patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -0,0 +1,15 @@
+#include <impl/Kokkos_Tools.hpp>
+
+int main(int argc, char* argv[]) {
+ Kokkos::Tools::initialize(argc, argv);
+ Kokkos::Tools::pushRegion(
+ "The unanimous Declaration of the thirteen united States of America, "
+ "When in the Course of human events, it becomes necessary for one people "
+ "to dissolve the political bands which have connected them with another, "
+ "and to assume among the powers of the earth, the separate and equal "
+ "station to which the Laws of Nature and of Nature's God entitle them, a "
+ "decent respect to the opinions of mankind requires that they should "
+ "declare the causes which impel them to the separation.");
+ Kokkos::Tools::popRegion();
+ Kokkos::Tools::finalize();
+} | 1 | 1 | 31,362 | Missing the license | kokkos-kokkos | cpp |
|
@@ -210,11 +210,9 @@ func (p *ReplicationTaskProcessorImpl) eventLoop() {
))
case <-cleanupTimer.C:
- if p.config.EnableCleanupReplicationTask() {
- if err := p.cleanupReplicationTasks(); err != nil {
- p.logger.Error("Failed to clean up replication messages.", tag.Error(err))
- p.metricsClient.Scope(metrics.ReplicationTaskCleanupScope).IncCounter(metrics.ReplicationTaskCleanupFailure)
- }
+ if err := p.cleanupReplicationTasks(); err != nil {
+ p.logger.Error("Failed to clean up replication messages.", tag.Error(err))
+ p.metricsClient.Scope(metrics.ReplicationTaskCleanupScope).IncCounter(metrics.ReplicationTaskCleanupFailure)
}
cleanupTimer.Reset(backoff.JitDuration(
p.config.ReplicationTaskProcessorCleanupInterval(shardID), | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination replicationTaskProcessor_mock.go -self_package go.temporal.io/server/service/history
package history
import (
"context"
"fmt"
"sync/atomic"
"time"
"go.temporal.io/api/serviceerror"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/historyservice/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
replicationspb "go.temporal.io/server/api/replication/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/collection"
"go.temporal.io/server/common/convert"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/common/quotas"
serviceerrors "go.temporal.io/server/common/serviceerror"
"go.temporal.io/server/service/history/configs"
"go.temporal.io/server/service/history/shard"
)
const (
dropSyncShardTaskTimeThreshold = 10 * time.Minute
replicationTimeout = 30 * time.Second
taskErrorRetryBackoffCoefficient = 1.2
taskErrorRetryMaxInterval = 5 * time.Second
)
var (
// ErrUnknownReplicationTask is the error to indicate unknown replication task type
ErrUnknownReplicationTask = serviceerror.NewInvalidArgument("unknown replication task")
)
type (
// ReplicationTaskProcessorImpl is responsible for processing replication tasks for a shard.
ReplicationTaskProcessorImpl struct {
currentCluster string
sourceCluster string
status int32
shard shard.Context
historyEngine shard.Engine
historySerializer persistence.PayloadSerializer
config *configs.Config
metricsClient metrics.Client
logger log.Logger
replicationTaskExecutor replicationTaskExecutor
hostRateLimiter *quotas.DynamicRateLimiter
shardRateLimiter *quotas.DynamicRateLimiter
taskRetryPolicy backoff.RetryPolicy
dlqRetryPolicy backoff.RetryPolicy
// send side
minTxAckedTaskID int64
// recv side
maxRxProcessedTaskID int64
requestChan chan<- *replicationTaskRequest
syncShardChan chan *replicationspb.SyncShardStatus
shutdownChan chan struct{}
}
// ReplicationTaskProcessor is responsible for processing replication tasks for a shard.
ReplicationTaskProcessor interface {
common.Daemon
}
replicationTaskRequest struct {
token *replicationspb.ReplicationToken
respChan chan<- *replicationspb.ReplicationMessages
}
)
// NewReplicationTaskProcessor creates a new replication task processor.
func NewReplicationTaskProcessor(
shard shard.Context,
historyEngine shard.Engine,
config *configs.Config,
metricsClient metrics.Client,
replicationTaskFetcher ReplicationTaskFetcher,
replicationTaskExecutor replicationTaskExecutor,
) *ReplicationTaskProcessorImpl {
shardID := shard.GetShardID()
taskRetryPolicy := backoff.NewExponentialRetryPolicy(config.ReplicationTaskProcessorErrorRetryWait(shardID))
taskRetryPolicy.SetBackoffCoefficient(taskErrorRetryBackoffCoefficient)
taskRetryPolicy.SetMaximumInterval(taskErrorRetryMaxInterval)
taskRetryPolicy.SetMaximumAttempts(config.ReplicationTaskProcessorErrorRetryMaxAttempts(shardID))
dlqRetryPolicy := backoff.NewExponentialRetryPolicy(config.ReplicationTaskProcessorErrorRetryWait(shardID))
dlqRetryPolicy.SetBackoffCoefficient(taskErrorRetryBackoffCoefficient)
dlqRetryPolicy.SetMaximumInterval(taskErrorRetryMaxInterval)
dlqRetryPolicy.SetMaximumAttempts(config.ReplicationTaskProcessorErrorRetryMaxAttempts(shardID))
return &ReplicationTaskProcessorImpl{
currentCluster: shard.GetClusterMetadata().GetCurrentClusterName(),
sourceCluster: replicationTaskFetcher.GetSourceCluster(),
status: common.DaemonStatusInitialized,
shard: shard,
historyEngine: historyEngine,
historySerializer: persistence.NewPayloadSerializer(),
config: config,
metricsClient: metricsClient,
logger: shard.GetLogger(),
replicationTaskExecutor: replicationTaskExecutor,
hostRateLimiter: replicationTaskFetcher.GetRateLimiter(),
shardRateLimiter: quotas.NewDynamicRateLimiter(func() float64 {
return config.ReplicationTaskProcessorShardQPS()
}), taskRetryPolicy: taskRetryPolicy,
requestChan: replicationTaskFetcher.GetRequestChan(),
syncShardChan: make(chan *replicationspb.SyncShardStatus, 1),
shutdownChan: make(chan struct{}),
minTxAckedTaskID: persistence.EmptyQueueMessageID,
maxRxProcessedTaskID: persistence.EmptyQueueMessageID,
}
}
// Start starts the processor
func (p *ReplicationTaskProcessorImpl) Start() {
if !atomic.CompareAndSwapInt32(
&p.status,
common.DaemonStatusInitialized,
common.DaemonStatusStarted,
) {
return
}
go p.eventLoop()
p.logger.Info("ReplicationTaskProcessor started.")
}
// Stop stops the processor
func (p *ReplicationTaskProcessorImpl) Stop() {
if !atomic.CompareAndSwapInt32(
&p.status,
common.DaemonStatusStarted,
common.DaemonStatusStopped,
) {
return
}
close(p.shutdownChan)
p.logger.Info("ReplicationTaskProcessor shutting down.")
}
func (p *ReplicationTaskProcessorImpl) eventLoop() {
shardID := p.shard.GetShardID()
syncShardTimer := time.NewTimer(backoff.JitDuration(
p.config.ShardSyncMinInterval(),
p.config.ShardSyncTimerJitterCoefficient(),
))
defer syncShardTimer.Stop()
cleanupTimer := time.NewTimer(backoff.JitDuration(
p.config.ReplicationTaskProcessorCleanupInterval(shardID),
p.config.ReplicationTaskProcessorCleanupJitterCoefficient(shardID),
))
defer cleanupTimer.Stop()
var syncShardTask *replicationspb.SyncShardStatus
for {
select {
case syncShardTask = <-p.syncShardChan:
case <-syncShardTimer.C:
if err := p.handleSyncShardStatus(syncShardTask); err != nil {
p.logger.Error("unable to sync shard status", tag.Error(err))
p.metricsClient.Scope(metrics.HistorySyncShardStatusScope).IncCounter(metrics.SyncShardFromRemoteFailure)
}
syncShardTimer.Reset(backoff.JitDuration(
p.config.ShardSyncMinInterval(),
p.config.ShardSyncTimerJitterCoefficient(),
))
case <-cleanupTimer.C:
if p.config.EnableCleanupReplicationTask() {
if err := p.cleanupReplicationTasks(); err != nil {
p.logger.Error("Failed to clean up replication messages.", tag.Error(err))
p.metricsClient.Scope(metrics.ReplicationTaskCleanupScope).IncCounter(metrics.ReplicationTaskCleanupFailure)
}
}
cleanupTimer.Reset(backoff.JitDuration(
p.config.ReplicationTaskProcessorCleanupInterval(shardID),
p.config.ReplicationTaskProcessorCleanupJitterCoefficient(shardID),
))
case <-p.shutdownChan:
return
default:
if err := p.pollProcessReplicationTasks(); err != nil {
p.logger.Error("unable to process replication tasks", tag.Error(err))
}
}
}
}
func (p *ReplicationTaskProcessorImpl) pollProcessReplicationTasks() error {
taskIterator := collection.NewPagingIterator(p.paginationFn)
count := 0
for taskIterator.HasNext() && !p.isStopped() {
task, err := taskIterator.Next()
if err != nil {
return err
}
count++
replicationTask := task.(*replicationspb.ReplicationTask)
if err = p.applyReplicationTask(replicationTask); err != nil {
return err
}
p.maxRxProcessedTaskID = replicationTask.GetSourceTaskId()
}
// TODO there should be better handling of remote not having replication tasks
// & make the application of replication task evenly distributed (in terms of time)
// stream / long poll API worth considering
if count == 0 {
time.Sleep(p.config.ReplicationTaskProcessorNoTaskRetryWait(p.shard.GetShardID()))
}
return nil
}
func (p *ReplicationTaskProcessorImpl) applyReplicationTask(
replicationTask *replicationspb.ReplicationTask,
) error {
err := p.handleReplicationTask(replicationTask)
if err == nil || p.isStopped() {
return err
}
p.logger.Error(
"failed to apply replication task after retry",
tag.TaskID(replicationTask.GetSourceTaskId()),
tag.Error(err),
)
request, err := p.convertTaskToDLQTask(replicationTask)
if err != nil {
p.logger.Error("failed to generate DLQ replication task", tag.Error(err))
return nil
}
if err := p.handleReplicationDLQTask(request); err != nil {
return err
}
return nil
}
func (p *ReplicationTaskProcessorImpl) handleSyncShardStatus(
status *replicationspb.SyncShardStatus,
) error {
now := p.shard.GetTimeSource().Now()
if status == nil {
return nil
} else if now.Sub(timestamp.TimeValue(status.GetStatusTime())) > dropSyncShardTaskTimeThreshold {
return nil
}
p.metricsClient.Scope(metrics.HistorySyncShardStatusScope).IncCounter(metrics.SyncShardFromRemoteCounter)
ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout)
defer cancel()
return p.historyEngine.SyncShardStatus(ctx, &historyservice.SyncShardStatusRequest{
SourceCluster: p.sourceCluster,
ShardId: p.shard.GetShardID(),
StatusTime: status.StatusTime,
})
}
func (p *ReplicationTaskProcessorImpl) handleReplicationTask(
replicationTask *replicationspb.ReplicationTask,
) error {
// TODO create a dedicated multi-stage rate limiter
ctx := context.Background()
_ = p.shardRateLimiter.Wait(ctx)
_ = p.hostRateLimiter.Wait(ctx)
operation := func() error {
scope, err := p.replicationTaskExecutor.execute(replicationTask, false)
p.emitTaskMetrics(scope, err)
return err
}
return backoff.Retry(operation, p.taskRetryPolicy, p.isRetryableError)
}
func (p *ReplicationTaskProcessorImpl) handleReplicationDLQTask(
request *persistence.PutReplicationTaskToDLQRequest,
) error {
// TODO create a dedicated multi-stage rate limiter
ctx := context.Background()
_ = p.shardRateLimiter.Wait(ctx)
_ = p.hostRateLimiter.Wait(ctx)
p.logger.Info("enqueue replication task to DLQ",
tag.WorkflowNamespaceID(request.TaskInfo.GetNamespaceId()),
tag.WorkflowID(request.TaskInfo.GetWorkflowId()),
tag.WorkflowRunID(request.TaskInfo.GetRunId()),
tag.TaskID(request.TaskInfo.GetTaskId()),
)
p.metricsClient.Scope(
metrics.ReplicationDLQStatsScope,
metrics.TargetClusterTag(p.sourceCluster),
metrics.InstanceTag(convert.Int32ToString(p.shard.GetShardID())),
).UpdateGauge(
metrics.ReplicationDLQMaxLevelGauge,
float64(request.TaskInfo.GetTaskId()),
)
// The following is guaranteed to success or retry forever until processor is shutdown.
return backoff.Retry(func() error {
err := p.shard.GetExecutionManager().PutReplicationTaskToDLQ(request)
if err != nil {
p.logger.Error("failed to enqueue replication task to DLQ", tag.Error(err))
p.metricsClient.IncCounter(metrics.ReplicationTaskFetcherScope, metrics.ReplicationDLQFailed)
}
return err
}, p.dlqRetryPolicy, p.isRetryableError)
}
func (p *ReplicationTaskProcessorImpl) convertTaskToDLQTask(
replicationTask *replicationspb.ReplicationTask,
) (*persistence.PutReplicationTaskToDLQRequest, error) {
switch replicationTask.TaskType {
case enumsspb.REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK:
taskAttributes := replicationTask.GetSyncActivityTaskAttributes()
return &persistence.PutReplicationTaskToDLQRequest{
SourceClusterName: p.sourceCluster,
TaskInfo: &persistencespb.ReplicationTaskInfo{
NamespaceId: taskAttributes.GetNamespaceId(),
WorkflowId: taskAttributes.GetWorkflowId(),
RunId: taskAttributes.GetRunId(),
TaskId: replicationTask.GetSourceTaskId(),
TaskType: enumsspb.TASK_TYPE_REPLICATION_SYNC_ACTIVITY,
ScheduledId: taskAttributes.GetScheduledId(),
Version: taskAttributes.GetVersion(),
},
}, nil
case enumsspb.REPLICATION_TASK_TYPE_HISTORY_V2_TASK:
taskAttributes := replicationTask.GetHistoryTaskV2Attributes()
eventsDataBlob := persistence.NewDataBlobFromProto(taskAttributes.GetEvents())
events, err := p.historySerializer.DeserializeEvents(eventsDataBlob)
if err != nil {
return nil, err
}
if len(events) == 0 {
p.logger.Error("Empty events in a batch")
return nil, fmt.Errorf("corrupted history event batch, empty events")
}
firstEvent := events[0]
lastEvent := events[len(events)-1]
return &persistence.PutReplicationTaskToDLQRequest{
SourceClusterName: p.sourceCluster,
TaskInfo: &persistencespb.ReplicationTaskInfo{
NamespaceId: taskAttributes.GetNamespaceId(),
WorkflowId: taskAttributes.GetWorkflowId(),
RunId: taskAttributes.GetRunId(),
TaskId: replicationTask.GetSourceTaskId(),
TaskType: enumsspb.TASK_TYPE_REPLICATION_HISTORY,
FirstEventId: firstEvent.GetEventId(),
NextEventId: lastEvent.GetEventId(),
Version: firstEvent.GetVersion(),
},
}, nil
default:
return nil, fmt.Errorf("unknown replication task type")
}
}
func (p *ReplicationTaskProcessorImpl) paginationFn(_ []byte) ([]interface{}, []byte, error) {
respChan := make(chan *replicationspb.ReplicationMessages, 1)
p.requestChan <- &replicationTaskRequest{
token: &replicationspb.ReplicationToken{
ShardId: p.shard.GetShardID(),
LastProcessedMessageId: p.maxRxProcessedTaskID,
LastRetrievedMessageId: p.maxRxProcessedTaskID,
},
respChan: respChan,
}
select {
case resp, ok := <-respChan:
if !ok {
return nil, nil, nil
}
select {
case p.syncShardChan <- resp.GetSyncShardStatus():
default:
// channel full, it is ok to drop the sync shard status
// since sync shard status are periodically updated
}
var tasks []interface{}
for _, task := range resp.GetReplicationTasks() {
tasks = append(tasks, task)
}
return tasks, nil, nil
case <-p.shutdownChan:
return nil, nil, nil
}
}
func (p *ReplicationTaskProcessorImpl) cleanupReplicationTasks() error {
clusterMetadata := p.shard.GetClusterMetadata()
currentCluster := clusterMetadata.GetCurrentClusterName()
var minAckedTaskID *int64
for clusterName, clusterInfo := range clusterMetadata.GetAllClusterInfo() {
if !clusterInfo.Enabled || clusterName == currentCluster {
continue
}
ackLevel := p.shard.GetClusterReplicationLevel(clusterName)
if minAckedTaskID == nil || ackLevel < *minAckedTaskID {
minAckedTaskID = &ackLevel
}
}
if minAckedTaskID == nil || *minAckedTaskID <= p.minTxAckedTaskID {
return nil
}
p.logger.Info("cleaning up replication task queue", tag.ReadLevel(*minAckedTaskID))
p.metricsClient.Scope(metrics.ReplicationTaskCleanupScope).IncCounter(metrics.ReplicationTaskCleanupCount)
p.metricsClient.Scope(
metrics.ReplicationTaskFetcherScope,
metrics.TargetClusterTag(p.currentCluster),
).RecordTimer(
metrics.ReplicationTasksLag,
time.Duration(p.shard.GetTransferMaxReadLevel()-*minAckedTaskID),
)
err := p.shard.GetExecutionManager().RangeCompleteReplicationTask(
&persistence.RangeCompleteReplicationTaskRequest{
InclusiveEndTaskID: *minAckedTaskID,
},
)
if err == nil {
p.minTxAckedTaskID = *minAckedTaskID
}
return err
}
func (p *ReplicationTaskProcessorImpl) emitTaskMetrics(scope int, err error) {
if common.IsContextDeadlineExceededErr(err) || common.IsContextCanceledErr(err) {
p.metricsClient.IncCounter(scope, metrics.ServiceErrContextTimeoutCounter)
return
}
// Also update counter to distinguish between type of failures
switch err.(type) {
case nil:
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksApplied)
case *serviceerrors.ShardOwnershipLost:
p.metricsClient.IncCounter(scope, metrics.ServiceErrShardOwnershipLostCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
case *serviceerror.InvalidArgument:
p.metricsClient.IncCounter(scope, metrics.ServiceErrInvalidArgumentCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
case *serviceerror.NamespaceNotActive:
p.metricsClient.IncCounter(scope, metrics.ServiceErrNamespaceNotActiveCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
case *serviceerror.WorkflowExecutionAlreadyStarted:
p.metricsClient.IncCounter(scope, metrics.ServiceErrExecutionAlreadyStartedCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
case *serviceerror.NotFound:
p.metricsClient.IncCounter(scope, metrics.ServiceErrNotFoundCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
case *serviceerror.ResourceExhausted:
p.metricsClient.IncCounter(scope, metrics.ServiceErrResourceExhaustedCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
case *serviceerrors.RetryReplication:
p.metricsClient.IncCounter(scope, metrics.ServiceErrRetryTaskCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
default:
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
}
}
func (p *ReplicationTaskProcessorImpl) isStopped() bool {
return atomic.LoadInt32(&p.status) == common.DaemonStatusStopped
}
func (p *ReplicationTaskProcessorImpl) isRetryableError(
err error,
) bool {
if p.isStopped() {
return false
}
switch err.(type) {
case *serviceerror.InvalidArgument:
return false
default:
return true
}
}
| 1 | 10,928 | Looks like this was previously guarded by `EnableCleanupReplicationTask` flag. Now looks like this is always needed. Just want to confirm the intention is if `GlobalNamespace` is enabled then we want run cleanupReplicationTasks in all cases? | temporalio-temporal | go |
@@ -37,7 +37,6 @@
#include "tbb/tbb.h"
#include <tbb/task_arena.h>
#include <tbb/task_scheduler_observer.h>
- #include <tbb/task_scheduler_init.h>
#include <tbb/parallel_reduce.h>
#include <tbb/blocked_range.h>
#include <tbb/tick_count.h> | 1 | /* file: service_thread_pinner.cpp */
/*******************************************************************************
* Copyright 2014-2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
//++
// Implementation of thread pinner class
//--
*/
#include "services/daal_defines.h"
#if !(defined DAAL_THREAD_PINNING_DISABLED)
#include "src/threading/service_thread_pinner.h"
#include "services/daal_memory.h"
#include "src/threading/threading.h"
#if defined(__DO_TBB_LAYER__)
#define USE_TASK_ARENA_CURRENT_SLOT 1
#define LOG_PINNING 1
#define TBB_PREVIEW_TASK_ARENA 1
#define TBB_PREVIEW_LOCAL_OBSERVER 1
#include "tbb/tbb.h"
#include <tbb/task_arena.h>
#include <tbb/task_scheduler_observer.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/parallel_reduce.h>
#include <tbb/blocked_range.h>
#include <tbb/tick_count.h>
#include <tbb/scalable_allocator.h>
#include "services/daal_atomic_int.h"
using namespace daal::services;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
#define __PINNER_WINDOWS__
#if defined(_WIN64)
#define MASK_WIDTH 64
#else
#define MASK_WIDTH 32
#endif
#else // LINUX
#include <sched.h>
#define __PINNER_LINUX__
#ifdef __FreeBSD__
#include <pthread_np.h>
cpu_set_t * __sched_cpualloc(size_t count)
{
return (cpu_set_t *)malloc(CPU_ALLOC_SIZE(count));
}
void __sched_cpufree(cpu_set_t * set)
{
free(set);
}
int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t * mask)
{
return cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, pid == 0 ? -1 : pid, cpusetsize, mask);
}
#endif
#endif
struct cpu_mask_t
{
int status;
#if defined(_WIN32) || defined(_WIN64)
GROUP_AFFINITY ga;
#else
int ncpus;
int bit_parts_size;
cpu_set_t * cpu_set;
#endif
cpu_mask_t()
{
status = 0;
#if defined __PINNER_LINUX__
ncpus = 0;
bit_parts_size = 0;
cpu_set = NULL;
for (ncpus = sizeof(cpu_set_t) / CHAR_BIT; ncpus < 16 * 1024; ncpus <<= 1)
{
cpu_set = CPU_ALLOC(ncpus);
if (cpu_set == NULL) break;
bit_parts_size = CPU_ALLOC_SIZE(ncpus);
CPU_ZERO_S(bit_parts_size, cpu_set);
const int err = sched_getaffinity(0, bit_parts_size, cpu_set);
if (err == 0) break;
CPU_FREE(cpu_set);
cpu_set = NULL;
if (errno != EINVAL) break;
}
if (cpu_set == NULL)
#else // defined __PINNER_WINDOWS__
bool retval = GetThreadGroupAffinity(GetCurrentThread(), &ga);
if (!retval)
#endif
{
status--;
}
return;
}
int get_thread_affinity()
{
if (status == 0)
{
#if defined __PINNER_LINUX__
int err = pthread_getaffinity_np(pthread_self(), bit_parts_size, cpu_set);
if (err)
#else // defined __PINNER_WINDOWS__
bool retval = GetThreadGroupAffinity(GetCurrentThread(), &ga);
if (!retval)
#endif
{
status--;
}
}
return status;
} // int get_thread_affinity()
int set_thread_affinity()
{
if (status == 0)
{
#if defined __PINNER_LINUX__
int err = pthread_setaffinity_np(pthread_self(), bit_parts_size, cpu_set);
if (err)
#else // defined __PINNER_WINDOWS__
bool retval = SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL);
if (!retval)
#endif
{
status--;
}
}
return status;
} // int set_thread_affinity()
int set_cpu_index(int cpu_idx)
{
if (status == 0)
{
#if defined __PINNER_LINUX__
CPU_ZERO_S(bit_parts_size, cpu_set);
CPU_SET_S(cpu_idx, bit_parts_size, cpu_set);
#else // defined __PINNER_WINDOWS__
ga.Group = cpu_idx / MASK_WIDTH;
ga.Mask = cpu_idx % MASK_WIDTH;
#endif
}
return status;
} // int set_cpu_index(int cpu_idx)
int get_status() { return status; } // int get_status()
~cpu_mask_t()
{
#if defined __PINNER_LINUX__
if (cpu_set != NULL)
{
CPU_FREE(cpu_set);
}
#endif
return;
} // ~cpu_mask_t()
};
class thread_pinner_impl_t : public tbb::task_scheduler_observer
{
int status;
int nthreads;
int max_threads;
int * cpu_queue;
bool do_pinning;
AtomicInt is_pinning;
tbb::enumerable_thread_specific<cpu_mask_t *> thread_mask;
tbb::task_arena pinner_arena;
void (*topo_deleter)(void *);
public:
thread_pinner_impl_t(void (*read_topo)(int &, int &, int &, int **), void (*deleter)(void *));
void on_scheduler_entry(bool);
void on_scheduler_exit(bool);
void init_thread_pinner(int statusToSet, int nthreadsToSet, int max_threadsToSet, int * cpu_queueToSet);
void execute(daal::services::internal::thread_pinner_task_t & task)
{
if (do_pinning && (status == 0) && (is_pinning.get() == 0))
{
is_pinning.set(1);
pinner_arena.execute(task);
is_pinning.set(0);
}
else
{
task();
}
}
int get_status();
bool get_pinning();
bool set_pinning(bool p);
~thread_pinner_impl_t();
} * IMPL;
thread_pinner_impl_t::thread_pinner_impl_t(void (*read_topo)(int &, int &, int &, int **), void (*deleter)(void *))
: pinner_arena(nthreads = daal::threader_get_threads_number()), tbb::task_scheduler_observer(pinner_arena), topo_deleter(deleter)
{
do_pinning = (nthreads > 0) ? true : false;
is_pinning.set(0);
read_topo(status, nthreads, max_threads, &cpu_queue);
observe(true);
return;
} /* thread_pinner_impl_t() */
void thread_pinner_impl_t::on_scheduler_entry(bool) /*override*/
{
if (do_pinning == false || status < 0) return;
// read current thread index
const int thr_idx = tbb::this_task_arena::current_thread_index();
// Get next cpu from topology queue
int cpu_idx = cpu_queue[thr_idx % max_threads];
// Allocate source and target affinity masks
cpu_mask_t * target_mask = new cpu_mask_t;
cpu_mask_t * source_mask = thread_mask.local();
// Create source mask if it wasn't created for the tread before
if (source_mask == NULL)
{
source_mask = new cpu_mask_t();
thread_mask.local() = source_mask;
}
// save source affinity mask to restore on exit
status -= source_mask->get_thread_affinity();
// Set ine bit corresponding to CPU to pin the thread
status -= target_mask->set_cpu_index(cpu_idx);
// Set thread affinity mask to 1 non-zero bit in corresponding to cpu_idx position
status -= target_mask->set_thread_affinity();
delete target_mask;
return;
} /* void on_scheduler_entry() */
void thread_pinner_impl_t::on_scheduler_exit(bool) /*override*/
{
if (do_pinning == false || status < 0) return;
// get current thread original mask
cpu_mask_t * source_mask = thread_mask.local();
if (source_mask == NULL)
{
status--;
return;
}
else
{
// restore original thread affinity mask
status -= source_mask->set_thread_affinity();
if (status < 0)
{
status--;
return;
}
}
return;
} /* void on_scheduler_exit( bool ) */
int thread_pinner_impl_t::get_status()
{
return status;
} /* int get_status() */
bool thread_pinner_impl_t::get_pinning()
{
return do_pinning;
} /* bool get_pinning() */
bool thread_pinner_impl_t::set_pinning(bool p)
{
bool old_pinning = do_pinning;
if (status == 0) do_pinning = p;
return old_pinning;
} /* bool set_pinning(bool p) */
thread_pinner_impl_t::~thread_pinner_impl_t()
{
observe(false);
if (cpu_queue) topo_deleter(cpu_queue);
thread_mask.combine_each([](cpu_mask_t *& source_mask) { delete source_mask; });
return;
} /* ~thread_pinner_impl_t() */
DAAL_EXPORT void * _getThreadPinner(bool create_pinner, void (*read_topo)(int &, int &, int &, int **), void (*deleter)(void *))
{
static bool pinner_created = false;
if (create_pinner == true || pinner_created == true)
{
static daal::services::internal::thread_pinner_t thread_pinner(read_topo, deleter);
if (thread_pinner.get_status() == 0)
{
pinner_created = true;
return (void *)&thread_pinner;
}
}
return NULL;
} /* thread_pinner_t* getThreadPinner() */
DAAL_EXPORT void _thread_pinner_thread_pinner_init(void (*read_topo)(int &, int &, int &, int **), void (*deleter)(void *))
{
static thread_pinner_impl_t impl(read_topo, deleter);
IMPL = &impl;
}
DAAL_EXPORT void _thread_pinner_execute(daal::services::internal::thread_pinner_task_t & task)
{
IMPL->execute(task);
}
DAAL_EXPORT int _thread_pinner_get_status()
{
return IMPL->get_status();
}
DAAL_EXPORT bool _thread_pinner_get_pinning()
{
return IMPL->get_pinning();
}
DAAL_EXPORT bool _thread_pinner_set_pinning(bool p)
{
return IMPL->set_pinning(p);
}
DAAL_EXPORT void _thread_pinner_on_scheduler_entry(bool p)
{
IMPL->on_scheduler_entry(p);
}
DAAL_EXPORT void _thread_pinner_on_scheduler_exit(bool p)
{
IMPL->on_scheduler_exit(p);
}
#else /* if __DO_TBB_LAYER__ is not defined */
DAAL_EXPORT void * _getThreadPinner(bool create_pinner, void (*read_topo)(int &, int &, int &, int **), void (*deleter)(void *))
{
return NULL;
}
DAAL_EXPORT void _thread_pinner_thread_pinner_init(void (*f)(int &, int &, int &, int **), void (*deleter)(void *)) {}
DAAL_EXPORT void _thread_pinner_execute(daal::services::internal::thread_pinner_task_t & task)
{
task();
}
DAAL_EXPORT bool _thread_pinner_get_pinning()
{
return false;
}
DAAL_EXPORT bool _thread_pinner_set_pinning(bool p)
{
return true;
}
DAAL_EXPORT int _thread_pinner_get_status()
{
return 0;
}
DAAL_EXPORT void _thread_pinner_on_scheduler_entry(bool p) {}
DAAL_EXPORT void _thread_pinner_on_scheduler_exit(bool p) {}
#endif /* if __DO_TBB_LAYER__ is not defined */
#endif /* #if !defined (DAAL_THREAD_PINNING_DISABLED) */
| 1 | 23,331 | Potentially it'll be good to remove all of them excluding "tbb/tbb.h" But let's do it next time | oneapi-src-oneDAL | cpp |
@@ -1210,3 +1210,19 @@ def is_nan(x):
return np.isnan(x)
except:
return False
+
+
+def bound_range(vals, density):
+ """
+ Computes a bounding range from a number of evenly spaced samples.
+ Will raise an error if samples are not evenly spaced within
+ tolerance.
+ """
+ low, high = vals.min(), vals.max()
+ invert = False
+ if vals[0] > vals[1]:
+ invert = True
+ if not density:
+ density = round(1./((high-low)/(len(vals)-1)), sys.float_info.dig)
+ halfd = 0.5/density
+ return low-halfd, high+halfd, density, invert | 1 | import os, sys, warnings, operator
import numbers
import itertools
import string, fnmatch
import unicodedata
import datetime as dt
from collections import defaultdict, Counter
import numpy as np
import param
import json
try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
datetime_types = (np.datetime64, dt.datetime)
try:
import pandas as pd # noqa (optional import)
datetime_types = datetime_types + (pd.tslib.Timestamp,)
except ImportError:
pd = None
try:
import dask.dataframe as dd
except ImportError:
dd = None
class HashableJSON(json.JSONEncoder):
"""
Extends JSONEncoder to generate a hashable string for as many types
of object as possible including nested objects and objects that are
not normally hashable. The purpose of this class is to generate
unique strings that once hashed are suitable for use in memoization
and other cases where deep equality must be tested without storing
the entire object.
By default JSONEncoder supports booleans, numbers, strings, lists,
tuples and dictionaries. In order to support other types such as
sets, datetime objects and mutable objects such as pandas Dataframes
or numpy arrays, HashableJSON has to convert these types to
datastructures that can normally be represented as JSON.
Support for other object types may need to be introduced in
future. By default, unrecognized object types are represented by
their id.
One limitation of this approach is that dictionaries with composite
keys (e.g tuples) are not supported due to the JSON spec.
"""
string_hashable = (dt.datetime,)
repr_hashable = ()
def default(self, obj):
if isinstance(obj, set):
return hash(frozenset(obj))
elif isinstance(obj, np.ndarray):
return obj.tolist()
if pd and isinstance(obj, (pd.Series, pd.DataFrame)):
return repr(sorted(list(obj.to_dict().items())))
elif isinstance(obj, self.string_hashable):
return str(obj)
elif isinstance(obj, self.repr_hashable):
return repr(obj)
try:
return hash(obj)
except:
return id(obj)
def deephash(obj):
"""
Given an object, return a hash using HashableJSON. This hash is not
architecture, Python version or platform independent.
"""
try:
return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True))
except:
return None
# Python3 compatibility
import types
if sys.version_info.major == 3:
basestring = str
unicode = str
generator_types = (zip, range, types.GeneratorType)
else:
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if isinstance(key, np.ndarray) and key.dtype.kind == 'b':
return key
wrapped_key = wrap_tuple(key)
if wrapped_key.count(Ellipsis)== 0:
return key
if wrapped_key.count(Ellipsis)!=1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def bytes_to_unicode(value):
"""
Safely casts bytestring to unicode
"""
if isinstance(value, bytes):
return unicode(value.decode('utf-8'))
return value
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class Aliases(object):
"""
Helper class useful for defining a set of alias tuples on a single object.
For instance, when defining a group or label with an alias, instead
of setting tuples in the constructor, you could use
``aliases.water`` if you first define:
>>> aliases = Aliases(water='H_2O', glucose='C_6H_{12}O_6')
>>> aliases.water
('water', 'H_2O')
This may be used to conveniently define aliases for groups, labels
or dimension names.
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, (k,v))
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggresive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the default capitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jpeg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
if identifier.startswith('_'): return True
return((identifier[0] in string.digits) if version==2
else (unicodedata.category(identifier[0]) in invalid_starting))
@param.parameterized.bothmethod
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement)
else:
chars += c
return chars
@param.parameterized.bothmethod
def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]):
"""
Given a unicode character c, return the shortened unicode name
(as a list of tokens) by applying the eliminations,
substitutions and transforms.
"""
name = unicodedata.name(c).lower()
# Filtering
for elim in eliminations:
name = name.replace(elim, '')
# Substitition
for i,o in substitutions.items():
name = name.replace(i, o)
for transform in transforms:
name = transform(name)
return ' '.join(name.strip().split()).replace(' ','_').replace('-','_')
def __call__(self, name, escape=True, version=None):
if name in [None, '']:
return name
elif name in self.aliases:
return self.aliases[name]
elif name in self._lookup_table:
return self._lookup_table[name]
name = bytes_to_unicode(name)
version = self.version if version is None else version
if not self.allowable(name):
raise AttributeError("String %r is in the disallowed list of attribute names: %r" % self.disallowed)
if version == 2:
name = self.remove_diacritics(name)
if self.capitalize and name and name[0] in string.ascii_lowercase:
name = name[0].upper()+name[1:]
sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name))
if self.prefixed(name, version):
sanitized = self.prefix + sanitized
self._lookup_table[name] = sanitized
return sanitized
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed
def sanitize_py2(self, name):
# This fix works but masks an issue in self.sanitize (py2)
prefix = '_' if name.startswith('_') else ''
valid_chars = string.ascii_letters+string.digits+'_'
return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars)))
def sanitize_py3(self, name):
if not name.isidentifier():
return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier()))
else:
return name
def sanitize(self, name, valid_fn):
"Accumulate blocks of hex and separate blocks by underscores"
invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'}
for cc in filter(lambda el: el in name, invalid.keys()):
raise Exception("Please use a raw string or escape control code '\%s'"
% invalid[cc])
sanitized, chars = [], ''
for split in name.split():
for c in split:
if valid_fn(c): chars += str(c) if c=='_' else c
else:
short = self.shortened_character_name(c, self.eliminations,
self.substitutions,
self.transforms)
sanitized.extend([chars] if chars else [])
if short != '':
sanitized.append(short)
chars = ''
if chars:
sanitized.extend([chars])
chars=''
return self._process_underscores(sanitized + ([chars] if chars else []))
sanitize_identifier = sanitize_identifier_fn.instance()
group_sanitizer = sanitize_identifier_fn.instance()
label_sanitizer = sanitize_identifier_fn.instance()
dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False)
def isnumeric(val):
if isinstance(val, (basestring, bool, np.bool_)):
return False
try:
float(val)
return True
except:
return False
def find_minmax(lims, olims):
"""
Takes (a1, a2) and (b1, b2) as input and returns
(np.nanmin(a1, b1), np.nanmax(a2, b2)). Used to calculate
min and max values of a number of items.
"""
try:
limzip = zip(list(lims), list(olims), [np.nanmin, np.nanmax])
limits = tuple([float(fn([l, ol])) for l, ol, fn in limzip])
except:
limits = (np.NaN, np.NaN)
return limits
def find_range(values, soft_range=[]):
"""
Safely finds either the numerical min and max of
a set of values, falling back to the first and
the last value in the sorted list of values.
"""
try:
values = np.array(values)
values = np.squeeze(values) if len(values.shape) > 1 else values
if len(soft_range):
values = np.concatenate([values, soft_range])
if values.dtype.kind == 'M':
return values.min(), values.max()
return np.nanmin(values), np.nanmax(values)
except:
try:
values = sorted(values)
return (values[0], values[-1])
except:
return (None, None)
def max_range(ranges):
"""
Computes the maximal lower and upper bounds from a list bounds.
"""
try:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
values = [r for r in ranges for v in r if v is not None]
if pd and all(isinstance(v, pd.tslib.Timestamp) for r in values for v in r):
values = [(v1.to_datetime64(), v2.to_datetime64()) for v1, v2 in values]
arr = np.array(values)
if arr.dtype.kind in 'OSU':
arr = np.sort([v for v in arr.flat if not is_nan(v)])
return arr[0], arr[-1]
if arr.dtype.kind in 'M':
return arr[:, 0].min(), arr[:, 1].max()
return (np.nanmin(arr[:, 0]), np.nanmax(arr[:, 1]))
except:
return (np.NaN, np.NaN)
def max_extents(extents, zrange=False):
"""
Computes the maximal extent in 2D and 3D space from
list of 4-tuples or 6-tuples. If zrange is enabled
all extents are converted to 6-tuples to comput
x-, y- and z-limits.
"""
if zrange:
num = 6
inds = [(0, 3), (1, 4), (2, 5)]
extents = [e if len(e) == 6 else (e[0], e[1], None,
e[2], e[3], None)
for e in extents]
else:
num = 4
inds = [(0, 2), (1, 3)]
arr = list(zip(*extents)) if extents else []
extents = [np.NaN] * num
if len(arr) == 0:
return extents
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for lidx, uidx in inds:
lower = [v for v in arr[lidx] if v is not None and not is_nan(v)]
upper = [v for v in arr[uidx] if v is not None and not is_nan(v)]
if lower and isinstance(lower[0], datetime_types):
extents[lidx] = np.min(lower)
elif any(isinstance(l, basestring) for l in lower):
extents[lidx] = np.sort(lower)[0]
elif lower:
extents[lidx] = np.nanmin(lower)
if upper and isinstance(upper[0], datetime_types):
extents[uidx] = np.max(upper)
elif any(isinstance(u, basestring) for u in upper):
extents[uidx] = np.sort(upper)[-1]
elif upper:
extents[uidx] = np.nanmax(upper)
return tuple(extents)
def int_to_alpha(n, upper=True):
"Generates alphanumeric labels of form A-Z, AA-ZZ etc."
casenum = 65 if upper else 97
label = ''
count= 0
if n == 0: return str(chr(n + casenum))
while n >= 0:
mod, div = n % 26, n
for _ in range(count):
div //= 26
div %= 26
if count == 0:
val = mod
else:
val = div
label += str(chr(val + casenum))
count += 1
n -= 26**count
return label[::-1]
def int_to_roman(input):
if type(input) != type(1):
raise TypeError("expected integer, got %s" % type(input))
if not 0 < input < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def unique_iterator(seq):
"""
Returns an iterator containing all non-duplicate elements
in the input sequence.
"""
seen = set()
for item in seq:
if item not in seen:
seen.add(item)
yield item
def unique_array(arr):
"""
Returns an array of unique values in the input order
"""
if not len(arr):
return arr
elif pd:
return pd.unique(arr)
else:
_, uniq_inds = np.unique(arr, return_index=True)
return arr[np.sort(uniq_inds)]
def match_spec(element, specification):
"""
Matches the group.label specification of the supplied
element against the supplied specification dictionary
returning the value of the best match.
"""
match_tuple = ()
match = specification.get((), {})
for spec in [type(element).__name__,
group_sanitizer(element.group, escape=False),
label_sanitizer(element.label, escape=False)]:
match_tuple += (spec,)
if match_tuple in specification:
match = specification[match_tuple]
return match
def python2sort(x,key=None):
if len(x) == 0: return x
it = iter(x)
groups = [[next(it)]]
for item in it:
for group in groups:
try:
item_precedence = item if key is None else key(item)
group_precedence = group[0] if key is None else key(group[0])
item_precedence < group_precedence # exception if not comparable
group.append(item)
break
except TypeError:
continue
else: # did not break, make new group
groups.append([item])
return itertools.chain.from_iterable(sorted(group, key=key) for group in groups)
def merge_dimensions(dimensions_list):
"""
Merges lists of fully or partially overlapping dimensions by
merging their values.
>>> from holoviews import Dimension
>>> dim_list = [[Dimension('A', values=[1, 2, 3]), Dimension('B')],
... [Dimension('A', values=[2, 3, 4])]]
>>> dimensions = merge_dimensions(dim_list)
>>> dimensions
[Dimension('A'), Dimension('B')]
>>> dimensions[0].values
[1, 2, 3, 4]
"""
dvalues = defaultdict(list)
dimensions = []
for dims in dimensions_list:
for d in dims:
dvalues[d.name].append(d.values)
if d not in dimensions:
dimensions.append(d)
dvalues = {k: list(unique_iterator(itertools.chain(*vals)))
for k, vals in dvalues.items()}
return [d(values=dvalues.get(d.name, [])) for d in dimensions]
def dimension_sort(odict, kdims, vdims, categorical, key_index, cached_values):
"""
Sorts data by key using usual Python tuple sorting semantics
or sorts in categorical order for any categorical Dimensions.
"""
sortkws = {}
ndims = len(kdims)
dimensions = kdims+vdims
indexes = [(dimensions[i], int(i not in range(ndims)),
i if i in range(ndims) else i-ndims)
for i in key_index]
cached_values = {d: [None]+vals for d, vals in cached_values.items()}
if len(set(key_index)) != len(key_index):
raise ValueError("Cannot sort on duplicated dimensions")
elif categorical:
sortkws['key'] = lambda x: tuple(cached_values[dim.name].index(x[t][d])
if dim.values else x[t][d]
for i, (dim, t, d) in enumerate(indexes))
elif key_index != list(range(len(kdims+vdims))):
sortkws['key'] = lambda x: tuple(x[t][d] for _, t, d in indexes)
if sys.version_info.major == 3:
return python2sort(odict.items(), **sortkws)
else:
return sorted(odict.items(), **sortkws)
# Copied from param should make param version public
def is_number(obj):
if isinstance(obj, numbers.Number): return True
# The extra check is for classes that behave like numbers, such as those
# found in numpy, gmpy, etc.
elif (hasattr(obj, '__int__') and hasattr(obj, '__add__')): return True
# This is for older versions of gmpy
elif hasattr(obj, 'qdiv'): return True
else: return False
class ProgressIndicator(param.Parameterized):
"""
Baseclass for any ProgressIndicator that indicates progress
as a completion percentage.
"""
percent_range = param.NumericTuple(default=(0.0, 100.0), doc="""
The total percentage spanned by the progress bar when called
with a value between 0% and 100%. This allows an overall
completion in percent to be broken down into smaller sub-tasks
that individually complete to 100 percent.""")
label = param.String(default='Progress', allow_None=True, doc="""
The label of the current progress bar.""")
def __call__(self, completion):
raise NotImplementedError
def sort_topologically(graph):
"""
Stackless topological sorting.
graph = {
3: [1],
5: [3],
4: [2],
6: [4],
}
sort_topologically(graph)
[[1, 2], [3, 4], [5, 6]]
"""
levels_by_name = {}
names_by_level = defaultdict(list)
def add_level_to_name(name, level):
levels_by_name[name] = level
names_by_level[level].append(name)
def walk_depth_first(name):
stack = [name]
while(stack):
name = stack.pop()
if name in levels_by_name:
continue
if name not in graph or not graph[name]:
level = 0
add_level_to_name(name, level)
continue
children = graph[name]
children_not_calculated = [child for child in children if child not in levels_by_name]
if children_not_calculated:
stack.append(name)
stack.extend(children_not_calculated)
continue
level = 1 + max(levels_by_name[lname] for lname in children)
add_level_to_name(name, level)
for name in graph:
walk_depth_first(name)
return list(itertools.takewhile(lambda x: x is not None,
(names_by_level.get(i, None)
for i in itertools.count())))
def is_cyclic(graph):
"""
Return True if the directed graph g has a cycle. The directed graph
should be represented as adictionary mapping of edges for each node.
"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in graph.get(vertex, ()):
if neighbour in path or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(v) for v in graph)
def one_to_one(graph, nodes):
"""
Return True if graph contains only one to one mappings. The
directed graph should be represented as a dictionary mapping of
edges for each node. Nodes should be passed a simple list.
"""
edges = itertools.chain.from_iterable(graph.values())
return len(graph) == len(nodes) and len(set(edges)) == len(nodes)
def get_overlay_spec(o, k, v):
"""
Gets the type.group.label + key spec from an Element in an Overlay.
"""
k = wrap_tuple(k)
return ((type(v).__name__, v.group, v.label) + k if len(o.kdims) else
(type(v).__name__,) + k)
def layer_sort(hmap):
"""
Find a global ordering for layers in a HoloMap of CompositeOverlay
types.
"""
orderings = {}
for o in hmap:
okeys = [get_overlay_spec(o, k, v) for k, v in o.data.items()]
if len(okeys) == 1 and not okeys[0] in orderings:
orderings[okeys[0]] = []
else:
orderings.update({k: [] if k == v else [v] for k, v in zip(okeys[1:], okeys)})
return [i for g in sort_topologically(orderings) for i in sorted(g)]
def layer_groups(ordering, length=2):
"""
Splits a global ordering of Layers into groups based on a slice of
the spec. The grouping behavior can be modified by changing the
length of spec the entries are grouped by.
"""
group_orderings = defaultdict(list)
for el in ordering:
group_orderings[el[:length]].append(el)
return group_orderings
def group_select(selects, length=None, depth=None):
"""
Given a list of key tuples to select, groups them into sensible
chunks to avoid duplicating indexing operations.
"""
if length == None and depth == None:
length = depth = len(selects[0])
getter = operator.itemgetter(depth-length)
if length > 1:
selects = sorted(selects, key=getter)
grouped_selects = defaultdict(dict)
for k, v in itertools.groupby(selects, getter):
grouped_selects[k] = group_select(list(v), length-1, depth)
return grouped_selects
else:
return list(selects)
def iterative_select(obj, dimensions, selects, depth=None):
"""
Takes the output of group_select selecting subgroups iteratively,
avoiding duplicating select operations.
"""
ndims = len(dimensions)
depth = depth if depth is not None else ndims
items = []
if isinstance(selects, dict):
for k, v in selects.items():
items += iterative_select(obj.select(**{dimensions[ndims-depth]: k}),
dimensions, v, depth-1)
else:
for s in selects:
items.append((s, obj.select(**{dimensions[-1]: s[-1]})))
return items
def get_spec(obj):
"""
Gets the spec from any labeled data object.
"""
return (obj.__class__.__name__,
obj.group, obj.label)
def find_file(folder, filename):
"""
Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder.
"""
matches = []
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
for root, _, filenames in os.walk(folder):
for fn in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, fn))
if not matches:
raise IOError('File %s could not be found' % filename)
return matches[-1]
def is_dataframe(data):
"""
Checks whether the supplied data is DatFrame type.
"""
return((pd is not None and isinstance(data, pd.DataFrame)) or
(dd is not None and isinstance(data, dd.DataFrame)))
def get_param_values(data):
params = dict(kdims=data.kdims, vdims=data.vdims,
label=data.label)
if (data.group != data.params()['group'].default and not
isinstance(type(data).group, property)):
params['group'] = data.group
return params
def get_ndmapping_label(ndmapping, attr):
"""
Function to get the first non-auxiliary object
label attribute from an NdMapping.
"""
label = None
els = itervalues(ndmapping.data)
while label is None:
try:
el = next(els)
except StopIteration:
return None
if not el._auxiliary_component:
label = getattr(el, attr)
if attr == 'group':
tp = type(el).__name__
if tp == label:
return None
return label
def wrap_tuple(unwrapped):
""" Wraps any non-tuple types in a tuple """
return (unwrapped if isinstance(unwrapped, tuple) else (unwrapped,))
def stream_name_mapping(stream, exclude_params=['name'], reverse=False):
"""
Return a complete dictionary mapping between stream parameter names
to their applicable renames, excluding parameters listed in
exclude_params.
If reverse is True, the mapping is from the renamed strings to the
original stream parameter names.
"""
filtered = [k for k in stream.params().keys() if k not in exclude_params]
mapping = {k:stream._rename.get(k,k) for k in filtered}
if reverse:
return {v:k for k,v in mapping.items()}
else:
return mapping
def rename_stream_kwargs(stream, kwargs, reverse=False):
"""
Given a stream and a kwargs dictionary of parameter values, map to
the corresponding dictionary where the keys are substituted with the
appropriately renamed string.
If reverse, the output will be a dictionary using the original
parameter names given a dictionary using the renamed equivalents.
"""
mapped_kwargs = {}
mapping = stream_name_mapping(stream, reverse=reverse)
for k,v in kwargs.items():
if k not in mapping:
msg = 'Could not map key {key} {direction} renamed equivalent'
direction = 'from' if reverse else 'to'
raise KeyError(msg.format(key=repr(k), direction=direction))
mapped_kwargs[mapping[k]] = v
return mapped_kwargs
def stream_parameters(streams, no_duplicates=True, exclude=['name']):
"""
Given a list of streams, return a flat list of parameter name,
excluding those listed in the exclude list.
If no_duplicates is enabled, a KeyError will be raised if there are
parameter name clashes across the streams.
"""
param_groups = [s.contents.keys() for s in streams]
names = [name for group in param_groups for name in group]
if no_duplicates:
clashes = set([n for n in names if names.count(n) > 1])
if clashes:
raise KeyError('Parameter name clashes for keys: %r' % clashes)
return [name for name in names if name not in exclude]
def dimensionless_contents(streams, kdims, no_duplicates=True):
"""
Return a list of stream parameters that have not been associated
with any of the key dimensions.
"""
names = stream_parameters(streams, no_duplicates)
return [name for name in names if name not in kdims]
def unbound_dimensions(streams, kdims, no_duplicates=True):
"""
Return a list of dimensions that have not been associated with
any streams.
"""
params = stream_parameters(streams, no_duplicates)
return [d for d in kdims if d not in params]
def wrap_tuple_streams(unwrapped, kdims, streams):
"""
Fills in tuple keys with dimensioned stream values as appropriate.
"""
param_groups = [(s.contents.keys(), s) for s in streams]
pairs = [(name,s) for (group, s) in param_groups for name in group]
substituted = []
for pos,el in enumerate(wrap_tuple(unwrapped)):
if el is None and pos < len(kdims):
matches = [(name,s) for (name,s) in pairs if name==kdims[pos].name]
if len(matches) == 1:
(name, stream) = matches[0]
el = stream.contents[name]
substituted.append(el)
return tuple(substituted)
def drop_streams(streams, kdims, keys):
"""
Drop any dimensionsed streams from the keys and kdims.
"""
stream_params = stream_parameters(streams)
inds, dims = zip(*[(ind, kdim) for ind, kdim in enumerate(kdims)
if kdim not in stream_params])
return dims, [tuple(key[ind] for ind in inds) for key in keys]
def itervalues(obj):
"Get value iterator from dictionary for Python 2 and 3"
return iter(obj.values()) if sys.version_info.major == 3 else obj.itervalues()
def iterkeys(obj):
"Get key iterator from dictionary for Python 2 and 3"
return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys()
def get_unique_keys(ndmapping, dimensions):
inds = [ndmapping.get_dimension_index(dim) for dim in dimensions]
getter = operator.itemgetter(*inds)
return unique_iterator(getter(key) if len(inds) > 1 else (key[inds[0]],)
for key in ndmapping.data.keys())
def unpack_group(group, getter):
for k, v in group.iterrows():
obj = v.values[0]
key = getter(k)
if hasattr(obj, 'kdims'):
yield (key, obj)
else:
obj = tuple(v)
yield (wrap_tuple(key), obj)
def capitalize(string):
"""
Capitalizes the first letter of a string.
"""
return string[0].upper() + string[1:]
def get_path(item):
"""
Gets a path from an Labelled object or from a tuple of an existing
path and a labelled object. The path strings are sanitized and
capitalized.
"""
sanitizers = [group_sanitizer, label_sanitizer]
if isinstance(item, tuple):
path, item = item
if item.label:
if len(path) > 1 and item.label == path[1]:
path = path[:2]
else:
path = path[:1] + (item.label,)
else:
path = path[:1]
else:
path = (item.group, item.label) if item.label else (item.group,)
return tuple(capitalize(fn(p)) for (p, fn) in zip(path, sanitizers))
def make_path_unique(path, counts):
"""
Given a path, a list of existing paths and counts for each of the
existing paths.
"""
while path in counts:
count = counts[path]
counts[path] += 1
path = path + (int_to_roman(count),)
if len(path) == 1:
path = path + (int_to_roman(counts.get(path, 1)),)
return path
class ndmapping_groupby(param.ParameterizedFunction):
"""
Apply a groupby operation to an NdMapping, using pandas to improve
performance (if available).
"""
def __call__(self, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
try:
import pandas # noqa (optional import)
groupby = self.groupby_pandas
except:
groupby = self.groupby_python
return groupby(ndmapping, dimensions, container_type,
group_type, sort=sort, **kwargs)
@param.parameterized.bothmethod
def groupby_pandas(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
if 'kdims' in kwargs:
idims = [ndmapping.get_dimension(d) for d in kwargs['kdims']]
else:
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
all_dims = [d.name for d in ndmapping.kdims]
inds = [ndmapping.get_dimension_index(dim) for dim in idims]
getter = operator.itemgetter(*inds) if inds else lambda x: tuple()
multi_index = pd.MultiIndex.from_tuples(ndmapping.keys(), names=all_dims)
df = pd.DataFrame(list(map(wrap_tuple, ndmapping.values())), index=multi_index)
kwargs = dict(dict(get_param_values(ndmapping), kdims=idims), **kwargs)
groups = ((wrap_tuple(k), group_type(OrderedDict(unpack_group(group, getter)), **kwargs))
for k, group in df.groupby(level=[d.name for d in dimensions]))
if sort:
selects = list(get_unique_keys(ndmapping, dimensions))
groups = sorted(groups, key=lambda x: selects.index(x[0]))
return container_type(groups, kdims=dimensions)
@param.parameterized.bothmethod
def groupby_python(self_or_cls, ndmapping, dimensions, container_type,
group_type, sort=False, **kwargs):
idims = [dim for dim in ndmapping.kdims if dim not in dimensions]
dim_names = [dim.name for dim in dimensions]
selects = get_unique_keys(ndmapping, dimensions)
selects = group_select(list(selects))
groups = [(k, group_type((v.reindex(idims) if hasattr(v, 'kdims')
else [((), (v,))]), **kwargs))
for k, v in iterative_select(ndmapping, dim_names, selects)]
return container_type(groups, kdims=dimensions)
def cartesian_product(arrays, flat=True, copy=False):
"""
Efficient cartesian product of a list of 1D arrays returning the
expanded array views for each dimensions. By default arrays are
flattened, which may be controlled with the flat flag. The array
views can be turned into regular arrays with the copy flag.
"""
arrays = np.broadcast_arrays(*np.ix_(*arrays))
if flat:
return tuple(arr.flatten() if copy else arr.flat for arr in arrays)
return tuple(arr.copy() if copy else arr for arr in arrays)
def arglexsort(arrays):
"""
Returns the indices of the lexicographical sorting
order of the supplied arrays.
"""
dtypes = ','.join(array.dtype.str for array in arrays)
recarray = np.empty(len(arrays[0]), dtype=dtypes)
for i, array in enumerate(arrays):
recarray['f%s' % i] = array
return recarray.argsort()
def get_dynamic_item(map_obj, dimensions, key):
"""
Looks up an item in a DynamicMap given a list of dimensions
and a corresponding key. The dimensions must be a subset
of the map_obj key dimensions.
"""
dmaps = map_obj.traverse(lambda x: x, ['DynamicMap'])
dmap = dmaps[0] if dmaps else map_obj
if key == () and (not dimensions or not dmap.kdims):
map_obj.traverse(lambda x: x[()], ['DynamicMap'])
return key, map_obj.map(lambda x: x.last, ['DynamicMap'])
elif isinstance(key, tuple):
dims = {d.name: k for d, k in zip(dimensions, key)
if d in map_obj.kdims}
key = tuple(dims.get(d.name) for d in map_obj.kdims)
el = map_obj.select(['DynamicMap', 'HoloMap'], **dims)
else:
el = None
return key, el
def expand_grid_coords(dataset, dim):
"""
Expand the coordinates along a dimension of the gridded
dataset into an ND-array matching the dimensionality of
the dataset.
"""
arrays = [dataset.interface.coords(dataset, d.name, True)
for d in dataset.kdims]
idx = dataset.get_dimension_index(dim)
return cartesian_product(arrays, flat=False)[idx]
def dt64_to_dt(dt64):
"""
Safely converts NumPy datetime64 to a datetime object.
"""
ts = (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
return dt.datetime.utcfromtimestamp(ts)
def is_nan(x):
"""
Checks whether value is NaN on arbitrary types
"""
try:
return np.isnan(x)
except:
return False
| 1 | 16,013 | I would just mention which tolerance - namely as reported by ``sys.float_info`` | holoviz-holoviews | py |
@@ -18,8 +18,9 @@ class ProposalsController < ApplicationController
def index
@CLOSED_PROPOSAL_LIMIT = 10
+
@pending_data = self.listing.pending
- @approved_data = self.listing.approved(@CLOSED_PROPOSAL_LIMIT)
+ @approved_data = self.listing.approved.alter_query{ |rel| rel.limit(@CLOSED_PROPOSAL_LIMIT) }
@cancelled_data = self.listing.cancelled
end
| 1 | class ProposalsController < ApplicationController
include TokenAuth
before_filter :authenticate_user!, except: :approve
# TODO use Policy for all actions
before_filter ->{authorize self.proposal}, only: [:show, :cancel, :cancel_form]
before_filter :needs_token_on_get, only: :approve
before_filter :validate_access, only: :approve
helper_method :display_status
add_template_helper ProposalsHelper
rescue_from Pundit::NotAuthorizedError, with: :auth_errors
def show
@proposal = self.proposal.decorate
@show_comments = true
@include_comments_files = true
end
def index
@CLOSED_PROPOSAL_LIMIT = 10
@pending_data = self.listing.pending
@approved_data = self.listing.approved(@CLOSED_PROPOSAL_LIMIT)
@cancelled_data = self.listing.cancelled
end
def archive
@proposals_data = self.listing.closed
end
def cancel_form
@proposal = self.proposal.decorate
end
def cancel
if params[:reason_input].present?
proposal = Proposal.find params[:id]
comments = "Request cancelled with comments: " + params[:reason_input]
proposal.cancel!
proposal.comments.create!(comment_text: comments, user_id: current_user.id)
flash[:success] = "Your request has been cancelled"
redirect_to proposal_path, id: proposal.id
Dispatcher.new.deliver_cancellation_emails(proposal)
else
redirect_to cancel_form_proposal_path, id: params[:id],
alert: "A reason for cancellation is required.
Please indicate why this request needs
to be cancelled."
end
end
def approve
approval = self.proposal.existing_approval_for(current_user)
if approval.user.delegates_to?(current_user)
# assign them to the approval
approval.update_attributes!(user: current_user)
end
approval.approve!
flash[:success] = "You have approved #{proposal.public_identifier}."
redirect_to proposal
end
# @todo - this is acting more like an index; rename existing #index to #mine
# or similar, then rename #query to #index
def query
query_listing = self.listing
@proposals_data = query_listing.query
@text = params[:text]
@start_date = query_listing.start_date
@end_date = query_listing.end_date
end
protected
def proposal
@cached_proposal ||= Proposal.find params[:id]
end
def auth_errors(exception)
if ['cancel','cancel_form'].include? params[:action]
redirect_to proposal_path, :alert => exception.message
else
super
end
end
def listing
Query::Proposal::Listing.new(current_user, params)
end
end
| 1 | 13,785 | :+1: Ideally this'll become something you could pass in to the config | 18F-C2 | rb |
@@ -775,7 +775,7 @@ void RaftPart::replicateLogs(folly::EventBase* eb,
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[index]->isLearner();
})
- .then(executor_.get(), [self = shared_from_this(),
+ .via(executor_.get()).then([self = shared_from_this(),
eb,
it = std::move(iter),
currTerm, | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/raftex/RaftPart.h"
#include <folly/io/async/EventBaseManager.h>
#include <folly/executors/IOThreadPoolExecutor.h>
#include <folly/gen/Base.h>
#include "gen-cpp2/RaftexServiceAsyncClient.h"
#include "base/CollectNSucceeded.h"
#include "thrift/ThriftClientManager.h"
#include "network/NetworkUtils.h"
#include "thread/NamedThread.h"
#include "kvstore/wal/FileBasedWal.h"
#include "kvstore/raftex/LogStrListIterator.h"
#include "kvstore/raftex/Host.h"
#include "time/WallClock.h"
DEFINE_uint32(raft_heartbeat_interval_secs, 5,
"Seconds between each heartbeat");
DEFINE_uint64(raft_snapshot_timeout, 60 * 5, "Max seconds between two snapshot requests");
DEFINE_uint32(max_batch_size, 256, "The max number of logs in a batch");
DEFINE_int32(wal_ttl, 86400, "Default wal ttl");
DEFINE_int64(wal_file_size, 128 * 1024 * 1024, "Default wal file size");
DEFINE_int32(wal_buffer_size, 8 * 1024 * 1024, "Default wal buffer size");
DEFINE_int32(wal_buffer_num, 4, "Default wal buffer number");
namespace nebula {
namespace raftex {
using nebula::network::NetworkUtils;
using nebula::thrift::ThriftClientManager;
using nebula::wal::FileBasedWal;
using nebula::wal::FileBasedWalPolicy;
class AppendLogsIterator final : public LogIterator {
public:
AppendLogsIterator(LogID firstLogId,
TermID termId,
RaftPart::LogCache logs,
folly::Function<std::string(AtomicOp op)> opCB)
: firstLogId_(firstLogId)
, termId_(termId)
, logId_(firstLogId)
, logs_(std::move(logs))
, opCB_(std::move(opCB)) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
AppendLogsIterator(const AppendLogsIterator&) = delete;
AppendLogsIterator(AppendLogsIterator&&) = default;
AppendLogsIterator& operator=(const AppendLogsIterator&) = delete;
AppendLogsIterator& operator=(AppendLogsIterator&&) = default;
bool leadByAtomicOp() const {
return leadByAtomicOp_;
}
bool hasNonAtomicOpLogs() const {
return hasNonAtomicOpLogs_;
}
LogID firstLogId() const {
return firstLogId_;
}
// Return true if the current log is a AtomicOp, otherwise return false
bool processAtomicOp() {
while (idx_ < logs_.size()) {
auto& tup = logs_.at(idx_);
auto logType = std::get<1>(tup);
if (logType != LogType::ATOMIC_OP) {
// Not a AtomicOp
return false;
}
// Process AtomicOp log
CHECK(!!opCB_);
opResult_ = opCB_(std::move(std::get<3>(tup)));
if (opResult_.size() > 0) {
// AtomicOp Succeeded
return true;
} else {
// AtomicOp failed, move to the next log, but do not increment the logId_
++idx_;
}
}
// Reached the end
return false;
}
LogIterator& operator++() override {
++idx_;
++logId_;
if (idx_ < logs_.size()) {
currLogType_ = logType();
valid_ = currLogType_ != LogType::ATOMIC_OP;
if (valid_) {
hasNonAtomicOpLogs_ = true;
}
valid_ = valid_ && lastLogType_ != LogType::COMMAND;
lastLogType_ = currLogType_;
} else {
valid_ = false;
}
return *this;
}
// The iterator becomes invalid when exhausting the logs
// **OR** running into a AtomicOp log
bool valid() const override {
return valid_;
}
LogID logId() const override {
DCHECK(valid());
return logId_;
}
TermID logTerm() const override {
return termId_;
}
ClusterID logSource() const override {
DCHECK(valid());
return std::get<0>(logs_.at(idx_));
}
folly::StringPiece logMsg() const override {
DCHECK(valid());
if (currLogType_ == LogType::ATOMIC_OP) {
return opResult_;
} else {
return std::get<2>(logs_.at(idx_));
}
}
// Return true when there is no more log left for processing
bool empty() const {
return idx_ >= logs_.size();
}
// Resume the iterator so that we can continue to process the remaining logs
void resume() {
CHECK(!valid_);
if (!empty()) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
}
LogType logType() const {
return std::get<1>(logs_.at(idx_));
}
private:
size_t idx_{0};
bool leadByAtomicOp_{false};
bool hasNonAtomicOpLogs_{false};
bool valid_{true};
LogType lastLogType_{LogType::NORMAL};
LogType currLogType_{LogType::NORMAL};
std::string opResult_;
LogID firstLogId_;
TermID termId_;
LogID logId_;
RaftPart::LogCache logs_;
folly::Function<std::string(AtomicOp op)> opCB_;
};
/********************************************************
*
* Implementation of RaftPart
*
*******************************************************/
RaftPart::RaftPart(ClusterID clusterId,
GraphSpaceID spaceId,
PartitionID partId,
HostAddr localAddr,
const folly::StringPiece walRoot,
std::shared_ptr<folly::IOThreadPoolExecutor> pool,
std::shared_ptr<thread::GenericThreadPool> workers,
std::shared_ptr<folly::Executor> executor,
std::shared_ptr<SnapshotManager> snapshotMan)
: idStr_{folly::stringPrintf("[Port: %d, Space: %d, Part: %d] ",
localAddr.second, spaceId, partId)}
, clusterId_{clusterId}
, spaceId_{spaceId}
, partId_{partId}
, addr_{localAddr}
, status_{Status::STARTING}
, role_{Role::FOLLOWER}
, leader_{0, 0}
, ioThreadPool_{pool}
, bgWorkers_{workers}
, executor_(executor)
, snapshot_(snapshotMan)
, weight_(1) {
FileBasedWalPolicy policy;
policy.ttl = FLAGS_wal_ttl;
policy.fileSize = FLAGS_wal_file_size;
policy.bufferSize = FLAGS_wal_buffer_size;
policy.numBuffers = FLAGS_wal_buffer_num;
wal_ = FileBasedWal::getWal(walRoot,
idStr_,
policy,
[this] (LogID logId,
TermID logTermId,
ClusterID logClusterId,
const std::string& log) {
return this->preProcessLog(logId,
logTermId,
logClusterId,
log);
});
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
logs_.reserve(FLAGS_max_batch_size);
CHECK(!!executor_) << idStr_ << "Should not be nullptr";
}
RaftPart::~RaftPart() {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition has stopped
CHECK(status_ == Status::STOPPED);
LOG(INFO) << idStr_ << " The part has been destroyed...";
}
const char* RaftPart::roleStr(Role role) const {
switch (role) {
case Role::LEADER:
return "Leader";
case Role::FOLLOWER:
return "Follower";
case Role::CANDIDATE:
return "Candidate";
case Role::LEARNER:
return "Learner";
default:
LOG(FATAL) << idStr_ << "Invalid role";
}
return nullptr;
}
void RaftPart::start(std::vector<HostAddr>&& peers, bool asLearner) {
std::lock_guard<std::mutex> g(raftLock_);
// Set the quorum number
quorum_ = (peers.size() + 1) / 2;
LOG(INFO) << idStr_ << "There are "
<< peers.size()
<< " peer hosts, and total "
<< peers.size() + 1
<< " copies. The quorum is " << quorum_ + 1
<< ", as learner " << asLearner;
auto logIdAndTerm = lastCommittedLogId();
committedLogId_ = logIdAndTerm.first;
term_ = proposedTerm_ = logIdAndTerm.second;
if (lastLogId_ < committedLogId_) {
lastLogId_ = committedLogId_;
lastLogTerm_ = term_;
wal_->reset();
}
// Start all peer hosts
for (auto& addr : peers) {
LOG(INFO) << idStr_ << "Add peer " << addr;
auto hostPtr = std::make_shared<Host>(addr, shared_from_this());
hosts_.emplace_back(hostPtr);
}
// Change the status
status_ = Status::RUNNING;
if (asLearner) {
role_ = Role::LEARNER;
}
startTimeMs_ = time::WallClock::fastNowInMilliSec();
// Set up a leader election task
size_t delayMS = 100 + folly::Random::rand32(900);
bgWorkers_->addDelayTask(delayMS, [self = shared_from_this()] {
self->statusPolling();
});
}
void RaftPart::stop() {
VLOG(2) << idStr_ << "Stopping the partition";
decltype(hosts_) hosts;
{
std::unique_lock<std::mutex> lck(raftLock_);
status_ = Status::STOPPED;
leader_ = {0, 0};
role_ = Role::FOLLOWER;
hosts = std::move(hosts_);
}
for (auto& h : hosts) {
h->stop();
}
VLOG(2) << idStr_ << "Invoked stop() on all peer hosts";
for (auto& h : hosts) {
VLOG(2) << idStr_ << "Waiting " << h->idStr() << " to stop";
h->waitForStop();
VLOG(2) << idStr_ << h->idStr() << "has stopped";
}
hosts.clear();
LOG(INFO) << idStr_ << "Partition has been stopped";
}
AppendLogResult RaftPart::canAppendLogs() {
CHECK(!raftLock_.try_lock());
if (status_ == Status::STARTING) {
LOG(ERROR) << idStr_ << "The partition is still starting";
return AppendLogResult::E_NOT_READY;
}
if (status_ == Status::STOPPED) {
LOG(ERROR) << idStr_ << "The partition is stopped";
return AppendLogResult::E_STOPPED;
}
if (role_ != Role::LEADER) {
LOG(ERROR) << idStr_ << "The partition is not a leader";
return AppendLogResult::E_NOT_A_LEADER;
}
return AppendLogResult::SUCCEEDED;
}
void RaftPart::addLearner(const HostAddr& addr) {
CHECK(!raftLock_.try_lock());
if (addr == addr_) {
LOG(INFO) << idStr_ << "I am learner!";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&addr] (const auto& h) {
return h->address() == addr;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(addr, shared_from_this(), true));
LOG(INFO) << idStr_ << "Add learner " << addr;
} else {
LOG(INFO) << idStr_ << "The host " << addr << " has been existed as "
<< ((*it)->isLearner() ? " learner " : " group member");
}
}
void RaftPart::preProcessTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Pre process transfer leader to " << target;
switch (role_) {
case Role::FOLLOWER: {
if (target != addr_ && target != HostAddr(0, 0)) {
LOG(INFO) << idStr_ << "I am follower, just wait for the new leader.";
} else {
LOG(INFO) << idStr_ << "I will be the new leader, trigger leader election now!";
bgWorkers_->addTask([self = shared_from_this()] {
{
std::unique_lock<std::mutex> lck(self->raftLock_);
self->role_ = Role::CANDIDATE;
self->leader_ = HostAddr(0, 0);
}
self->leaderElection();
});
}
break;
}
default: {
LOG(INFO) << idStr_ << "My role is " << roleStr(role_)
<< ", so do nothing when pre process transfer leader";
break;
}
}
}
void RaftPart::commitTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Commit transfer leader to " << target;
switch (role_) {
case Role::LEADER: {
if (target != addr_ && !hosts_.empty()) {
auto iter = std::find_if(hosts_.begin(), hosts_.end(), [] (const auto& h) {
return !h->isLearner();
});
if (iter != hosts_.end()) {
lastMsgRecvDur_.reset();
role_ = Role::FOLLOWER;
leader_ = HostAddr(0, 0);
LOG(INFO) << idStr_ << "Give up my leadership!";
}
} else {
LOG(INFO) << idStr_ << "I am already the leader!";
}
break;
}
case Role::FOLLOWER:
case Role::CANDIDATE: {
LOG(INFO) << idStr_ << "I am " << roleStr(role_) << ", just wait for the new leader!";
break;
}
case Role::LEARNER: {
LOG(INFO) << idStr_ << "I am learner, not in the raft group, skip the log";
break;
}
}
}
void RaftPart::updateQuorum() {
CHECK(!raftLock_.try_lock());
int32_t total = 0;
for (auto& h : hosts_) {
if (!h->isLearner()) {
total++;
}
}
quorum_ = (total + 1) / 2;
}
void RaftPart::addPeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
if (role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am learner, promote myself to be follower";
role_ = Role::FOLLOWER;
updateQuorum();
} else {
LOG(INFO) << idStr_ << "I am already in the raft group!";
}
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(peer, shared_from_this()));
updateQuorum();
LOG(INFO) << idStr_ << "Add peer " << peer;
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The host " << peer
<< " has been existed as learner, promote it!";
(*it)->setLearner(false);
updateQuorum();
} else {
LOG(INFO) << idStr_ << "The host " << peer << " has been existed as follower!";
}
}
}
void RaftPart::removePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
// status_ = Status::STOPPED;
LOG(INFO) << idStr_ << "Remove myself from the raft group.";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
LOG(INFO) << idStr_ << "The peer " << peer << " not exist!";
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The peer is learner, remove it directly!";
hosts_.erase(it);
return;
}
hosts_.erase(it);
updateQuorum();
LOG(INFO) << idStr_ << "Remove peer " << peer;
}
}
void RaftPart::preProcessRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::LEADER) {
LOG(INFO) << idStr_ << "I am leader, skip remove peer in preProcessLog";
return;
}
removePeer(peer);
}
void RaftPart::commitRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am " << roleStr(role_)
<< ", skip remove peer in commit";
return;
}
CHECK(Role::LEADER == role_);
removePeer(peer);
}
folly::Future<AppendLogResult> RaftPart::appendAsync(ClusterID source,
std::string log) {
if (source < 0) {
source = clusterId_;
}
return appendLogAsync(source, LogType::NORMAL, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::atomicOpAsync(AtomicOp op) {
return appendLogAsync(clusterId_, LogType::ATOMIC_OP, "", std::move(op));
}
folly::Future<AppendLogResult> RaftPart::sendCommandAsync(std::string log) {
return appendLogAsync(clusterId_, LogType::COMMAND, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::appendLogAsync(ClusterID source,
LogType logType,
std::string log,
AtomicOp op) {
LogCache swappedOutLogs;
auto retFuture = folly::Future<AppendLogResult>::makeEmpty();
if (bufferOverFlow_) {
PLOG_EVERY_N(WARNING, 30) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
{
std::lock_guard<std::mutex> lck(logsLock_);
VLOG(2) << idStr_ << "Checking whether buffer overflow";
if (logs_.size() >= FLAGS_max_batch_size) {
// Buffer is full
LOG(WARNING) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
bufferOverFlow_ = true;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
VLOG(2) << idStr_ << "Appending logs to the buffer";
// Append new logs to the buffer
DCHECK_GE(source, 0);
logs_.emplace_back(source, logType, std::move(log), std::move(op));
switch (logType) {
case LogType::ATOMIC_OP:
retFuture = cachingPromise_.getSingleFuture();
break;
case LogType::COMMAND:
retFuture = cachingPromise_.getAndRollSharedFuture();
break;
case LogType::NORMAL:
retFuture = cachingPromise_.getSharedFuture();
break;
}
bool expected = false;
if (replicatingLogs_.compare_exchange_strong(expected, true)) {
// We need to send logs to all followers
VLOG(2) << idStr_ << "Preparing to send AppendLog request";
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
std::swap(swappedOutLogs, logs_);
bufferOverFlow_ = false;
} else {
VLOG(2) << idStr_
<< "Another AppendLogs request is ongoing,"
" just return";
return retFuture;
}
}
LogID firstId = 0;
TermID termId = 0;
AppendLogResult res;
{
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs();
if (res == AppendLogResult::SUCCEEDED) {
firstId = lastLogId_ + 1;
termId = term_;
}
}
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_
<< "Cannot append logs, clean the buffer";
return res;
}
// Replicate buffered logs to all followers
// Replication will happen on a separate thread and will block
// until majority accept the logs, the leadership changes, or
// the partition stops
VLOG(2) << idStr_ << "Calling appendLogsInternal()";
AppendLogsIterator it(
firstId,
termId,
std::move(swappedOutLogs),
[this] (AtomicOp opCB) -> std::string {
CHECK(opCB != nullptr);
auto opRet = opCB();
if (opRet.empty()) {
// Failed
sendingPromise_.setOneSingleValue(AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
appendLogsInternal(std::move(it), termId);
return retFuture;
}
void RaftPart::appendLogsInternal(AppendLogsIterator iter, TermID termId) {
TermID currTerm = 0;
LogID prevLogId = 0;
TermID prevLogTerm = 0;
LogID committed = 0;
LogID lastId = 0;
if (iter.valid()) {
VLOG(2) << idStr_ << "Ready to append logs from id "
<< iter.logId() << " (Current term is "
<< currTerm << ")";
} else {
LOG(ERROR) << idStr_ << "Only happend when Atomic op failed";
replicatingLogs_ = false;
return;
}
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (term_ != termId) {
VLOG(2) << idStr_ << "Term has been updated, origin "
<< termId << ", new " << term_;
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
currTerm = term_;
prevLogId = lastLogId_;
prevLogTerm = lastLogTerm_;
committed = committedLogId_;
// Step 1: Write WAL
if (!wal_->appendLogs(iter)) {
LOG(ERROR) << idStr_ << "Failed to write into WAL";
res = AppendLogResult::E_WAL_FAILURE;
break;
}
lastId = wal_->lastLogId();
VLOG(2) << idStr_ << "Succeeded writing logs ["
<< iter.firstLogId() << ", " << lastId << "] to WAL";
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Failed append logs";
return;
}
// Step 2: Replicate to followers
auto* eb = ioThreadPool_->getEventBase();
replicateLogs(eb,
std::move(iter),
currTerm,
lastId,
committed,
prevLogTerm,
prevLogId);
return;
}
void RaftPart::replicateLogs(folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId) {
using namespace folly; // NOLINT since the fancy overload of | operator
decltype(hosts_) hosts;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
hosts = hosts_;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Replicate logs failed";
return;
}
VLOG(2) << idStr_ << "About to replicate logs to all peer hosts";
collectNSucceeded(
gen::from(hosts)
| gen::map([self = shared_from_this(),
eb,
currTerm,
lastLogId,
prevLogId,
prevLogTerm,
committedId] (std::shared_ptr<Host> hostPtr) {
VLOG(2) << self->idStr_
<< "Appending logs to "
<< hostPtr->idStr();
return via(eb, [=] () -> Future<cpp2::AppendLogResponse> {
return hostPtr->appendLogs(eb,
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts] (size_t index, cpp2::AppendLogResponse& resp) {
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[index]->isLearner();
})
.then(executor_.get(), [self = shared_from_this(),
eb,
it = std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogId,
prevLogTerm,
pHosts = std::move(hosts)] (folly::Try<AppendLogResponses>&& result) mutable {
VLOG(2) << self->idStr_ << "Received enough response";
CHECK(!result.hasException());
self->processAppendLogResponses(*result,
eb,
std::move(it),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId,
std::move(pHosts));
return *result;
});
}
void RaftPart::processAppendLogResponses(
const AppendLogResponses& resps,
folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId,
std::vector<std::shared_ptr<Host>> hosts) {
// Make sure majority have succeeded
size_t numSucceeded = 0;
for (auto& res : resps) {
if (!hosts[res.first]->isLearner()
&& res.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
}
if (numSucceeded >= quorum_) {
// Majority have succeeded
VLOG(2) << idStr_ << numSucceeded
<< " hosts have accepted the logs";
LogID firstLogId = 0;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
LOG(INFO) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (currTerm != term_) {
LOG(INFO) << idStr_ << "The leader has changed, ABA problem.";
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
lastLogId_ = lastLogId;
lastLogTerm_ = currTerm;
lastMsgSentDur_.reset();
auto walIt = wal_->iterator(committedId + 1, lastLogId);
// Step 3: Commit the batch
if (commitLogs(std::move(walIt))) {
committedLogId_ = lastLogId;
firstLogId = lastLogId_ + 1;
} else {
LOG(FATAL) << idStr_ << "Failed to commit logs";
}
VLOG(2) << idStr_ << "Leader succeeded in committing the logs "
<< committedId + 1 << " to " << lastLogId;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "processAppendLogResponses failed!";
return;
}
// Step 4: Fulfill the promise
if (iter.hasNonAtomicOpLogs()) {
sendingPromise_.setOneSharedValue(AppendLogResult::SUCCEEDED);
}
if (iter.leadByAtomicOp()) {
sendingPromise_.setOneSingleValue(AppendLogResult::SUCCEEDED);
}
// Step 5: Check whether need to continue
// the log replication
CHECK(replicatingLogs_);
// Continue to process the original AppendLogsIterator if necessary
iter.resume();
if (iter.empty()) {
std::lock_guard<std::mutex> lck(logsLock_);
VLOG(2) << idStr_ << "logs size " << logs_.size();
if (logs_.size() > 0) {
// continue to replicate the logs
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
iter = AppendLogsIterator(
firstLogId,
currTerm,
std::move(logs_),
[this] (AtomicOp op) -> std::string {
auto opRet = op();
if (opRet.empty()) {
// Failed
sendingPromise_.setOneSingleValue(
AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
logs_.clear();
bufferOverFlow_ = false;
} else {
replicatingLogs_ = false;
VLOG(2) << idStr_ << "No more log to be replicated";
}
}
if (!iter.empty()) {
this->appendLogsInternal(std::move(iter), currTerm);
}
} else {
// Not enough hosts accepted the log, re-try
LOG(WARNING) << idStr_ << "Only " << numSucceeded
<< " hosts succeeded, Need to try again";
replicateLogs(eb,
std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
}
}
bool RaftPart::needToSendHeartbeat() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::RUNNING &&
role_ == Role::LEADER &&
lastMsgSentDur_.elapsedInSec() >= FLAGS_raft_heartbeat_interval_secs * 2 / 5;
}
bool RaftPart::needToStartElection() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING &&
role_ == Role::FOLLOWER &&
(lastMsgRecvDur_.elapsedInSec() >= weight_ * FLAGS_raft_heartbeat_interval_secs ||
term_ == 0)) {
LOG(INFO) << idStr_ << "Start leader election, reason: lastMsgDur "
<< lastMsgRecvDur_.elapsedInSec()
<< ", term " << term_;
role_ = Role::CANDIDATE;
leader_ = HostAddr(0, 0);
LOG(INFO) << idStr_
<< "needToStartElection: lastMsgRecvDur " << lastMsgRecvDur_.elapsedInSec()
<< ", term_ " << term_;
}
return role_ == Role::CANDIDATE;
}
bool RaftPart::prepareElectionRequest(
cpp2::AskForVoteRequest& req,
std::vector<std::shared_ptr<Host>>& hosts) {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (status_ != Status::RUNNING) {
VLOG(2) << idStr_ << "The partition is not running";
return false;
}
// Make sure the role is still CANDIDATE
if (role_ != Role::CANDIDATE) {
VLOG(2) << idStr_ << "A leader has been elected";
return false;
}
req.set_space(spaceId_);
req.set_part(partId_);
req.set_candidate_ip(addr_.first);
req.set_candidate_port(addr_.second);
req.set_term(++proposedTerm_); // Bump up the proposed term
req.set_last_log_id(lastLogId_);
req.set_last_log_term(lastLogTerm_);
hosts = followers();
return true;
}
typename RaftPart::Role RaftPart::processElectionResponses(
const RaftPart::ElectionResponses& results) {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (status_ != Status::RUNNING) {
VLOG(2) << idStr_ << "The partition is not running";
return Role::FOLLOWER;
}
if (role_ != Role::CANDIDATE) {
LOG(INFO) << idStr_ << "Partition's role has changed to "
<< roleStr(role_)
<< " during the election, so discard the results";
return role_;
}
size_t numSucceeded = 0;
for (auto& r : results) {
if (r.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
}
CHECK(role_ == Role::CANDIDATE);
if (numSucceeded >= quorum_) {
LOG(INFO) << idStr_
<< "Partition is elected as the new leader for term "
<< proposedTerm_;
term_ = proposedTerm_;
role_ = Role::LEADER;
}
return role_;
}
bool RaftPart::leaderElection() {
VLOG(2) << idStr_ << "Start leader election...";
using namespace folly; // NOLINT since the fancy overload of | operator
cpp2::AskForVoteRequest voteReq;
decltype(hosts_) hosts;
if (!prepareElectionRequest(voteReq, hosts)) {
return false;
}
// Send out the AskForVoteRequest
LOG(INFO) << idStr_ << "Sending out an election request "
<< "(space = " << voteReq.get_space()
<< ", part = " << voteReq.get_part()
<< ", term = " << voteReq.get_term()
<< ", lastLogId = " << voteReq.get_last_log_id()
<< ", lastLogTerm = " << voteReq.get_last_log_term()
<< ", candidateIP = "
<< NetworkUtils::intToIPv4(voteReq.get_candidate_ip())
<< ", candidatePort = " << voteReq.get_candidate_port()
<< ")";
auto resps = ElectionResponses();
if (hosts.empty()) {
VLOG(2) << idStr_ << "No peer found, I will be the leader";
} else {
auto eb = ioThreadPool_->getEventBase();
auto futures = collectNSucceeded(
gen::from(hosts)
| gen::map([eb, self = shared_from_this(), &voteReq] (auto& host) {
VLOG(2) << self->idStr_
<< "Sending AskForVoteRequest to "
<< host->idStr();
return via(
eb,
[&voteReq, &host] ()
-> Future<cpp2::AskForVoteResponse> {
return host->askForVote(voteReq);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts, this](size_t idx, cpp2::AskForVoteResponse& resp) {
if (resp.get_error_code() == cpp2::ErrorCode::E_LOG_STALE) {
LOG(INFO) << idStr_ << "My last log id is less than " << hosts[idx]
<< ", double my election interval.";
uint64_t curWeight = weight_.load();
weight_.store(curWeight * 2);
}
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED;
});
VLOG(2) << idStr_
<< "AskForVoteRequest has been sent to all peers"
", waiting for responses";
futures.wait();
CHECK(!futures.hasException())
<< "Got exception -- "
<< futures.result().exception().what().toStdString();
VLOG(2) << idStr_ << "Got AskForVote response back";
resps = std::move(futures).get();
}
// Process the responses
switch (processElectionResponses(resps)) {
case Role::LEADER: {
// Elected
LOG(INFO) << idStr_
<< "The partition is elected as the leader";
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING) {
leader_ = addr_;
bgWorkers_->addTask([self = shared_from_this(),
term = voteReq.get_term()] {
self->onElected(term);
});
}
}
weight_ = 1;
sendHeartbeat();
return true;
}
case Role::FOLLOWER: {
// Someone was elected
LOG(INFO) << idStr_ << "Someone else was elected";
return true;
}
case Role::CANDIDATE: {
// No one has been elected
LOG(INFO) << idStr_
<< "No one is elected, continue the election";
return false;
}
case Role::LEARNER: {
LOG(FATAL) << idStr_ << " Impossible! There must be some bugs!";
return false;
}
}
LOG(FATAL) << "Should not reach here";
return false;
}
void RaftPart::statusPolling() {
size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3;
if (needToStartElection()) {
LOG(INFO) << idStr_ << "Need to start leader election";
if (leaderElection()) {
VLOG(2) << idStr_ << "Stop the election";
} else {
// No leader has been elected, need to continue
// (After sleeping a random period betwen [500ms, 2s])
VLOG(2) << idStr_ << "Wait for a while and continue the leader election";
delay = (folly::Random::rand32(1500) + 500) * weight_;
}
} else if (needToSendHeartbeat()) {
VLOG(2) << idStr_ << "Need to send heartbeat";
sendHeartbeat();
}
if (needToCleanupSnapshot()) {
LOG(INFO) << idStr_ << "Clean up the snapshot";
cleanupSnapshot();
}
if (needToCleanWal()) {
wal_->cleanWAL(FLAGS_wal_ttl);
}
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING || status_ == Status::WAITING_SNAPSHOT) {
VLOG(3) << idStr_ << "Schedule new task";
bgWorkers_->addDelayTask(
delay,
[self = shared_from_this()] {
self->statusPolling();
});
}
}
}
bool RaftPart::needToCleanupSnapshot() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::WAITING_SNAPSHOT &&
role_ != Role::LEADER &&
lastSnapshotRecvDur_.elapsedInSec() >= FLAGS_raft_snapshot_timeout;
}
void RaftPart::cleanupSnapshot() {
LOG(INFO) << idStr_ << "Clean up the snapshot";
std::lock_guard<std::mutex> g(raftLock_);
reset();
status_ = Status::RUNNING;
}
bool RaftPart::needToCleanWal() {
std::lock_guard<std::mutex> g(raftLock_);
for (auto& host : hosts_) {
if (host->sendingSnapshot_) {
return false;
}
}
return true;
}
void RaftPart::processAskForVoteRequest(
const cpp2::AskForVoteRequest& req,
cpp2::AskForVoteResponse& resp) {
LOG(INFO) << idStr_
<< "Recieved a VOTING request"
<< ": space = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", candidateAddr = "
<< NetworkUtils::intToIPv4(req.get_candidate_ip()) << ":"
<< req.get_candidate_port()
<< ", term = " << req.get_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", lastLogTerm = " << req.get_last_log_term();
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (status_ != Status::RUNNING) {
LOG(ERROR) << idStr_ << "The partition is not running";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
LOG(INFO) << idStr_ << "The partition currently is a "
<< roleStr(role_);
if (role_ == Role::LEARNER) {
resp.set_error_code(cpp2::ErrorCode::E_BAD_ROLE);
return;
}
// Check term id
auto term = role_ == Role::CANDIDATE ? proposedTerm_ : term_;
if (req.get_term() <= term) {
LOG(INFO) << idStr_
<< (role_ == Role::CANDIDATE
? "The partition is currently proposing term "
: "The partition currently is on term ")
<< term
<< ". The term proposed by the candidate is"
" no greater, so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
// Check the last term to receive a log
if (req.get_last_log_term() < lastLogTerm_) {
LOG(INFO) << idStr_
<< "The partition's last term to receive a log is "
<< lastLogTerm_
<< ", which is newer than the candidate's"
". So the candidate will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (req.get_last_log_term() == lastLogTerm_) {
// Check last log id
if (req.get_last_log_id() < lastLogId_) {
LOG(INFO) << idStr_
<< "The partition's last log id is " << lastLogId_
<< ". The candidate's last log id is smaller"
", so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
}
auto candidate = HostAddr(req.get_candidate_ip(), req.get_candidate_port());
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
LOG(INFO) << idStr_ << "The candidate " << candidate << " is not my peers";
resp.set_error_code(cpp2::ErrorCode::E_WRONG_LEADER);
return;
}
// Ok, no reason to refuse, we will vote for the candidate
LOG(INFO) << idStr_ << "The partition will vote for the candidate";
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
Role oldRole = role_;
TermID oldTerm = term_;
role_ = Role::FOLLOWER;
term_ = proposedTerm_ = req.get_term();
leader_ = std::make_pair(req.get_candidate_ip(),
req.get_candidate_port());
// Reset the last message time
lastMsgRecvDur_.reset();
weight_ = 1;
// If the partition used to be a leader, need to fire the callback
if (oldRole == Role::LEADER) {
LOG(INFO) << idStr_ << "Was a leader, need to do some clean-up";
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId()
<< " i did not commit when i was leader, rollback to " << lastLogId_;
wal_->rollbackToLog(lastLogId_);
}
// Need to invoke the onLostLeadership callback
bgWorkers_->addTask(
[self = shared_from_this(), oldTerm] {
self->onLostLeadership(oldTerm);
});
}
LOG(INFO) << idStr_ << "I was " << roleStr(oldRole)
<< ", discover the new leader " << leader_;
bgWorkers_->addTask([self = shared_from_this()] {
self->onDiscoverNewLeader(self->leader_);
});
return;
}
void RaftPart::processAppendLogRequest(
const cpp2::AppendLogRequest& req,
cpp2::AppendLogResponse& resp) {
VLOG(2) << idStr_
<< "Received logAppend "
<< ": GraphSpaceId = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", current_term = " << req.get_current_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", committedLogId = " << req.get_committed_log_id()
<< ", leaderIp = " << req.get_leader_ip()
<< ", leaderPort = " << req.get_leader_port()
<< ", lastLogIdSent = " << req.get_last_log_id_sent()
<< ", lastLogTermSent = " << req.get_last_log_term_sent()
<< folly::stringPrintf(
", num_logs = %ld, logTerm = %ld",
req.get_log_str_list().size(),
req.get_log_term())
<< ", sendingSnapshot = " << req.get_sending_snapshot()
<< ", local lastLogId = " << lastLogId_
<< ", local committedLogId = " << committedLogId_;
std::lock_guard<std::mutex> g(raftLock_);
resp.set_current_term(term_);
resp.set_leader_ip(leader_.first);
resp.set_leader_port(leader_.second);
resp.set_committed_log_id(committedLogId_);
resp.set_last_log_id(lastLogId_ < committedLogId_ ? committedLogId_ : lastLogId_);
resp.set_last_log_term(lastLogTerm_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
VLOG(2) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
VLOG(2) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
// Check leadership
cpp2::ErrorCode err = verifyLeader(req);
if (err != cpp2::ErrorCode::SUCCEEDED) {
// Wrong leadership
VLOG(2) << idStr_ << "Will not follow the leader";
resp.set_error_code(err);
return;
}
// Reset the timeout timer
lastMsgRecvDur_.reset();
if (req.get_sending_snapshot() && status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to wait for the snapshot";
reset();
status_ = Status::WAITING_SNAPSHOT;
resp.set_error_code(cpp2::ErrorCode::E_WAITING_SNAPSHOT);
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
VLOG(2) << idStr_
<< "The part is receiving snapshot,"
<< "so just accept the new wals, but don't commit them."
<< "last_log_id_sent " << req.get_last_log_id_sent()
<< ", total log number " << req.get_log_str_list().size();
if (lastLogId_ > 0 && req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
}
// TODO(heng): if we have 3 node, one is leader, one is wait snapshot and return success,
// the other is follower, but leader replica log to follow failed,
// How to deal with leader crash? At this time, no leader will be elected.
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
// When leader has been sending a snapshot already, sometimes it would send a request
// with empty log list, and lastLogId in wal may be 0 because of reset.
if (numLogs != 0) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
}
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
}
return;
}
if (req.get_last_log_id_sent() < committedLogId_) {
LOG(INFO) << idStr_ << "Stale log! The log " << req.get_last_log_id_sent()
<< " i had committed yet. My committedLogId is "
<< committedLogId_;
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
if (lastLogTerm_ > 0 && req.get_last_log_term_sent() != lastLogTerm_) {
LOG(INFO) << idStr_ << "The local last log term is " << lastLogTerm_
<< ", which is different from the leader's prevLogTerm "
<< req.get_last_log_term_sent()
<< ". So need to rollback to last committedLogId_ " << committedLogId_;
if (wal_->rollbackToLog(committedLogId_)) {
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
}
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() < lastLogId_) {
LOG(INFO) << idStr_ << "Stale log! Local lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", lastLogIdSent " << req.get_last_log_id_sent()
<< ", lastLogTermSent " << req.get_last_log_term_sent();
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
// Append new logs
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
if (req.get_committed_log_id() > committedLogId_) {
// Commit some logs
// We can only commit logs from firstId to min(lastLogId_, leader's commit log id),
// follower can't always commit to leader's commit id because of lack of log
LogID lastLogIdCanCommit = std::min(lastLogId_, req.get_committed_log_id());
CHECK(committedLogId_ + 1 <= lastLogIdCanCommit);
if (commitLogs(wal_->iterator(committedLogId_ + 1, lastLogIdCanCommit))) {
VLOG(1) << idStr_ << "Follower succeeded committing log "
<< committedLogId_ + 1 << " to "
<< lastLogIdCanCommit;
committedLogId_ = lastLogIdCanCommit;
resp.set_committed_log_id(lastLogIdCanCommit);
} else {
LOG(ERROR) << idStr_ << "Failed to commit log "
<< committedLogId_ + 1 << " to "
<< req.get_committed_log_id();
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
}
cpp2::ErrorCode RaftPart::verifyLeader(
const cpp2::AppendLogRequest& req) {
CHECK(!raftLock_.try_lock());
auto candidate = HostAddr(req.get_leader_ip(), req.get_leader_port());
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
VLOG(2) << idStr_ << "The candidate leader " << candidate << " is not my peers";
return cpp2::ErrorCode::E_WRONG_LEADER;
}
VLOG(2) << idStr_ << "The current role is " << roleStr(role_);
switch (role_) {
case Role::LEARNER:
case Role::FOLLOWER: {
if (req.get_current_term() == term_ &&
req.get_leader_ip() == leader_.first &&
req.get_leader_port() == leader_.second) {
VLOG(3) << idStr_ << "Same leader";
return cpp2::ErrorCode::SUCCEEDED;
}
break;
}
case Role::LEADER: {
// In this case, the remote term has to be newer
// TODO optimize the case that the current partition is
// isolated and the term keeps going up
break;
}
case Role::CANDIDATE: {
// Since the current partition is a candidate, the remote
// term has to be newer so that it can be accepted
break;
}
}
// Make sure the remote term is greater than local's
if (req.get_current_term() < term_) {
PLOG_EVERY_N(ERROR, 100) << idStr_
<< "The current role is " << roleStr(role_)
<< ". The local term is " << term_
<< ". The remote term is not newer";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
}
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
if (req.get_current_term() == term_ && leader_ != std::make_pair(0, 0)) {
LOG(ERROR) << idStr_ << "The local term is same as remote term " << term_
<< ". But I believe leader exists.";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
}
}
Role oldRole = role_;
TermID oldTerm = term_;
// Ok, no reason to refuse, just follow the leader
LOG(INFO) << idStr_ << "The current role is " << roleStr(role_)
<< ". Will follow the new leader "
<< network::NetworkUtils::intToIPv4(req.get_leader_ip())
<< ":" << req.get_leader_port()
<< " [Term: " << req.get_current_term() << "]";
if (role_ != Role::LEARNER) {
role_ = Role::FOLLOWER;
}
leader_ = std::make_pair(req.get_leader_ip(),
req.get_leader_port());
term_ = proposedTerm_ = req.get_current_term();
weight_ = 1;
if (oldRole == Role::LEADER) {
VLOG(2) << idStr_ << "Was a leader, need to do some clean-up";
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId()
<< " i did not commit when i was leader, rollback to " << lastLogId_;
wal_->rollbackToLog(lastLogId_);
}
// Need to invoke onLostLeadership callback
bgWorkers_->addTask([self = shared_from_this(), oldTerm] {
self->onLostLeadership(oldTerm);
});
}
bgWorkers_->addTask([self = shared_from_this()] {
self->onDiscoverNewLeader(self->leader_);
});
return cpp2::ErrorCode::SUCCEEDED;
}
void RaftPart::processSendSnapshotRequest(const cpp2::SendSnapshotRequest& req,
cpp2::SendSnapshotResponse& resp) {
VLOG(1) << idStr_ << "Receive snapshot, total rows " << req.get_rows().size()
<< ", total count received " << req.get_total_count()
<< ", total size received " << req.get_total_size()
<< ", finished " << req.get_done();
std::lock_guard<std::mutex> g(raftLock_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(ERROR) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(ERROR) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
if (UNLIKELY(role_ != Role::FOLLOWER && role_ != Role::LEARNER)) {
LOG(ERROR) << idStr_ << "Bad role " << roleStr(role_);
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(leader_ != HostAddr(req.get_leader_ip(), req.get_leader_port())
|| term_ != req.get_term())) {
LOG(ERROR) << idStr_ << "Term out of date, current term " << term_
<< ", received term " << req.get_term();
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to receive the snapshot";
reset();
status_ = Status::WAITING_SNAPSHOT;
}
lastSnapshotRecvDur_.reset();
// TODO(heng): Maybe we should save them into one sst firstly?
auto ret = commitSnapshot(req.get_rows(),
req.get_committed_log_id(),
req.get_committed_log_term(),
req.get_done());
lastTotalCount_ += ret.first;
lastTotalSize_ += ret.second;
if (lastTotalCount_ != req.get_total_count()
|| lastTotalSize_ != req.get_total_size()) {
LOG(ERROR) << idStr_ << "Bad snapshot, total rows received " << lastTotalCount_
<< ", total rows sended " << req.get_total_count()
<< ", total size received " << lastTotalSize_
<< ", total size sended " << req.get_total_size();
resp.set_error_code(cpp2::ErrorCode::E_PERSIST_SNAPSHOT_FAILED);
return;
}
if (req.get_done()) {
committedLogId_ = req.get_committed_log_id();
if (lastLogId_ < committedLogId_) {
lastLogId_ = committedLogId_;
lastLogTerm_ = req.get_committed_log_term();
}
if (wal_->lastLogId() <= committedLogId_) {
LOG(INFO) << "Reset invalid wal after snapshot received";
wal_->reset();
}
status_ = Status::RUNNING;
LOG(INFO) << idStr_ << "Receive all snapshot, committedLogId_ " << committedLogId_
<< ", lastLodId " << lastLogId_ << ", lastLogTermId " << lastLogTerm_;
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
return;
}
folly::Future<AppendLogResult> RaftPart::sendHeartbeat() {
VLOG(2) << idStr_ << "Send heartbeat";
std::string log = "";
return appendLogAsync(clusterId_, LogType::NORMAL, std::move(log));
}
std::vector<std::shared_ptr<Host>> RaftPart::followers() const {
CHECK(!raftLock_.try_lock());
decltype(hosts_) hosts;
for (auto& h : hosts_) {
if (!h->isLearner()) {
hosts.emplace_back(h);
}
}
return hosts;
}
bool RaftPart::checkAppendLogResult(AppendLogResult res) {
if (res != AppendLogResult::SUCCEEDED) {
{
std::lock_guard<std::mutex> lck(logsLock_);
logs_.clear();
cachingPromise_.setValue(res);
cachingPromise_.reset();
bufferOverFlow_ = false;
}
sendingPromise_.setValue(res);
replicatingLogs_ = false;
return false;;
}
return true;
}
void RaftPart::reset() {
CHECK(!raftLock_.try_lock());
wal_->reset();
cleanup();
lastLogId_ = committedLogId_ = 0;
lastTotalCount_ = 0;
lastTotalSize_ = 0;
}
AppendLogResult RaftPart::isCatchedUp(const HostAddr& peer) {
std::lock_guard<std::mutex> lck(logsLock_);
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "I am not the leader";
return AppendLogResult::E_NOT_A_LEADER;
}
if (peer == addr_) {
LOG(INFO) << idStr_ << "I am the leader";
return AppendLogResult::SUCCEEDED;
}
for (auto& host : hosts_) {
if (host->addr_ == peer) {
return host->sendingSnapshot_ ? AppendLogResult::E_SENDING_SNAPSHOT
: AppendLogResult::SUCCEEDED;
}
}
return AppendLogResult::E_INVALID_PEER;
}
} // namespace raftex
} // namespace nebula
| 1 | 23,690 | Should using `thenValue` to replace `then` ? | vesoft-inc-nebula | cpp |
@@ -130,6 +130,12 @@ namespace Datadog.Trace.ClrProfiler.Integrations
private void OnError(object sender, EventArgs eventArgs)
{
+ if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
+ {
+ // integration disabled
+ return;
+ }
+
try
{
if (!TryGetContext(sender, out var httpContext) || httpContext.Error == null || | 1 | #if !NETSTANDARD2_0
using System;
using System.Web;
using Datadog.Trace.ClrProfiler.Interfaces;
using Datadog.Trace.ClrProfiler.Models;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
namespace Datadog.Trace.ClrProfiler.Integrations
{
/// <summary>
/// IHttpModule used to trace within an ASP.NET HttpApplication request
/// </summary>
public class AspNetHttpModule : IHttpModule
{
internal const string IntegrationName = "AspNet";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(AspNetHttpModule));
private readonly string _httpContextDelegateKey;
private readonly string _operationName;
/// <summary>
/// Initializes a new instance of the <see cref="AspNetHttpModule" /> class.
/// </summary>
public AspNetHttpModule()
: this("aspnet.request")
{
}
/// <summary>
/// Initializes a new instance of the <see cref="AspNetHttpModule" /> class.
/// </summary>
/// <param name="operationName">The operation name to be used for the trace/span data generated</param>
public AspNetHttpModule(string operationName)
{
_operationName = operationName ?? throw new ArgumentNullException(nameof(operationName));
_httpContextDelegateKey = string.Concat("__Datadog.Trace.ClrProfiler.Integrations.AspNetHttpModule-", _operationName);
}
/// <inheritdoc />
public void Init(HttpApplication httpApplication)
{
httpApplication.BeginRequest += OnBeginRequest;
httpApplication.EndRequest += OnEndRequest;
httpApplication.Error += OnError;
}
/// <inheritdoc />
public void Dispose()
{
// Nothing to do...
}
private void OnBeginRequest(object sender, EventArgs eventArgs)
{
var tracer = Tracer.Instance;
if (!tracer.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled
return;
}
Scope scope = null;
try
{
if (!TryGetContext(sender, out var httpContext))
{
return;
}
SpanContext propagatedContext = null;
if (tracer.ActiveScope == null)
{
try
{
// extract propagated http headers
var headers = httpContext.Request.Headers.Wrap();
propagatedContext = SpanContextPropagator.Instance.Extract(headers);
}
catch (Exception ex)
{
Log.Error(ex, "Error extracting propagated HTTP headers.");
}
}
scope = tracer.StartActive(_operationName, propagatedContext);
// set analytics sample rate if enabled
var analyticsSampleRate = tracer.Settings.GetIntegrationAnalyticsSampleRate(IntegrationName, enabledWithGlobalSetting: true);
scope.Span.SetMetric(Tags.Analytics, analyticsSampleRate);
httpContext.Items[_httpContextDelegateKey] = HttpContextSpanIntegrationDelegate.CreateAndBegin(httpContext, scope);
}
catch (Exception ex)
{
// Dispose here, as the scope won't be in context items and won't get disposed on request end in that case...
scope?.Dispose();
Log.Error(ex, "Datadog ASP.NET HttpModule instrumentation error");
}
}
private void OnEndRequest(object sender, EventArgs eventArgs)
{
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled
return;
}
try
{
if (!TryGetContext(sender, out var httpContext) ||
!httpContext.Items.TryGetValue<ISpanIntegrationDelegate>(_httpContextDelegateKey, out var integrationDelegate))
{
return;
}
integrationDelegate.OnEnd();
}
catch (Exception ex)
{
Log.Error(ex, "Datadog ASP.NET HttpModule instrumentation error");
}
}
private void OnError(object sender, EventArgs eventArgs)
{
try
{
if (!TryGetContext(sender, out var httpContext) || httpContext.Error == null ||
!httpContext.Items.TryGetValue<ISpanIntegrationDelegate>(_httpContextDelegateKey, out var integrationDelegate))
{
return;
}
integrationDelegate.OnError();
}
catch (Exception ex)
{
Log.Error(ex, "Datadog ASP.NET HttpModule instrumentation error");
}
}
private bool TryGetContext(object sender, out HttpContext httpContext)
{
if (sender == null || !(sender is HttpApplication httpApp) || httpApp?.Context?.Items == null)
{
httpContext = null;
return false;
}
httpContext = httpApp.Context;
return true;
}
}
}
#endif
| 1 | 16,095 | This follows the convention of each callback starting with the `IsIntegrationEnabled` check. | DataDog-dd-trace-dotnet | .cs |
@@ -26,11 +26,15 @@ namespace OpenTelemetry.Metrics
private long sumLong = 0;
private SumMetricLong sumMetricLong;
private DateTimeOffset startTimeExclusive;
+ private bool isDeltaValue;
+ private bool isMonotonicValue;
- internal SumMetricAggregatorLong(string name, string description, string unit, Meter meter, DateTimeOffset startTimeExclusive, KeyValuePair<string, object>[] attributes)
+ internal SumMetricAggregatorLong(string name, string description, string unit, Meter meter, DateTimeOffset startTimeExclusive, KeyValuePair<string, object>[] attributes, bool isDeltaValue, bool isMonotonicValue)
{
this.startTimeExclusive = startTimeExclusive;
this.sumMetricLong = new SumMetricLong(name, description, unit, meter, startTimeExclusive, attributes);
+ this.isDeltaValue = isDeltaValue;
+ this.isMonotonicValue = isMonotonicValue;
}
public void Update<T>(T value) | 1 | // <copyright file="SumMetricAggregatorLong.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics.Metrics;
namespace OpenTelemetry.Metrics
{
internal class SumMetricAggregatorLong : IAggregator
{
private readonly object lockUpdate = new object();
private long sumLong = 0;
private SumMetricLong sumMetricLong;
private DateTimeOffset startTimeExclusive;
internal SumMetricAggregatorLong(string name, string description, string unit, Meter meter, DateTimeOffset startTimeExclusive, KeyValuePair<string, object>[] attributes)
{
this.startTimeExclusive = startTimeExclusive;
this.sumMetricLong = new SumMetricLong(name, description, unit, meter, startTimeExclusive, attributes);
}
public void Update<T>(T value)
where T : struct
{
// TODO: Replace Lock with Interlocked.Add
lock (this.lockUpdate)
{
if (typeof(T) == typeof(long))
{
// TODO: Confirm this doesn't cause boxing.
var val = (long)(object)value;
if (val < 0)
{
// TODO: log?
// Also, this validation can be done in earlier stage.
}
else
{
this.sumLong += val;
}
}
else
{
throw new Exception("Unsupported Type");
}
}
}
public IMetric Collect(DateTimeOffset dt, bool isDelta)
{
lock (this.lockUpdate)
{
this.sumMetricLong.StartTimeExclusive = this.startTimeExclusive;
this.sumMetricLong.EndTimeInclusive = dt;
this.sumMetricLong.LongSum = this.sumLong;
this.sumMetricLong.IsDeltaTemporality = isDelta;
if (isDelta)
{
this.startTimeExclusive = dt;
this.sumLong = 0;
}
}
// TODO: Confirm that this approach of
// re-using the same instance is correct.
// This avoids allocating a new instance.
// It is read only for Exporters,
// and also there is no parallel
// Collect allowed.
return this.sumMetricLong;
}
public string ToDisplayString()
{
return $"Sum={this.sumLong}";
}
}
}
| 1 | 20,968 | this is not required to be part of this PR right? (With UpDownCounter being absent in .NET, we can make this hardcoded for now, i think) | open-telemetry-opentelemetry-dotnet | .cs |
@@ -0,0 +1,12 @@
+// Copyright (c) .NET Foundation. All rights reserved.
+// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
+
+using System.IO.Pipelines;
+
+namespace Microsoft.AspNetCore.Server.Kestrel.Transport
+{
+ public interface IConnectionHandler
+ {
+ IConnectionContext OnConnection(IConnectionInformation connectionInfo, IScheduler inputWriterScheduler, IScheduler outputReaderScheduler);
+ }
+} | 1 | 1 | 12,300 | Add the PipeFactory here | aspnet-KestrelHttpServer | .cs |
|
@@ -23,15 +23,12 @@ import (
"github.com/pkg/errors"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
- "github.com/chaos-mesh/chaos-mesh/controllers/common"
)
type Delegate struct {
impl interface{}
}
-var _ common.ChaosImpl = (*Delegate)(nil)
-
func (i *Delegate) callAccordingToAction(action, methodName string, defaultPhase v1alpha1.Phase, args ...interface{}) (v1alpha1.Phase, error) {
implType := reflect.TypeOf(i.impl).Elem()
implVal := reflect.ValueOf(i.impl) | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package action
import (
"context"
"reflect"
"strings"
"github.com/pkg/errors"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/common"
)
type Delegate struct {
impl interface{}
}
var _ common.ChaosImpl = (*Delegate)(nil)
func (i *Delegate) callAccordingToAction(action, methodName string, defaultPhase v1alpha1.Phase, args ...interface{}) (v1alpha1.Phase, error) {
implType := reflect.TypeOf(i.impl).Elem()
implVal := reflect.ValueOf(i.impl)
reflectArgs := []reflect.Value{}
for _, arg := range args {
reflectArgs = append(reflectArgs, reflect.ValueOf(arg))
}
for i := 0; i < implType.NumField(); i++ {
field := implType.Field(i)
actions := strings.Split(field.Tag.Get("action"), ",")
for i := range actions {
if actions[i] == action {
rets := implVal.Elem().FieldByIndex(field.Index).MethodByName(methodName).Call(reflectArgs)
// nil.(error) will panic :(
err := rets[1].Interface()
if err == nil {
return rets[0].Interface().(v1alpha1.Phase), nil
}
return rets[0].Interface().(v1alpha1.Phase), err.(error)
}
}
}
return defaultPhase, errors.Errorf("unknown action %s", action)
}
func (i *Delegate) getAction(obj v1alpha1.InnerObject) string {
return reflect.ValueOf(obj).Elem().FieldByName("Spec").FieldByName("Action").String()
}
func (i *Delegate) Apply(ctx context.Context, index int, records []*v1alpha1.Record, obj v1alpha1.InnerObject) (v1alpha1.Phase, error) {
return i.callAccordingToAction(i.getAction(obj), "Apply", v1alpha1.NotInjected, ctx, index, records, obj)
}
func (i *Delegate) Recover(ctx context.Context, index int, records []*v1alpha1.Record, obj v1alpha1.InnerObject) (v1alpha1.Phase, error) {
return i.callAccordingToAction(i.getAction(obj), "Recover", v1alpha1.Injected, ctx, index, records, obj)
}
func New(impl interface{}) Delegate {
return Delegate{
impl,
}
}
| 1 | 25,175 | please add `var _ impltypes.ChaosImpl = (*Delegate)(nil)` | chaos-mesh-chaos-mesh | go |
@@ -97,4 +97,15 @@ public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
* @return this for method chaining
*/
ExpireSnapshots executeWith(ExecutorService executorService);
+
+ /**
+ * Allows expiration of snapshots without any cleanup of underlying manifest or data files.
+ * <p>
+ * Allows control in removing data and manifest files which may be more efficiently removed using
+ * a distributed framework through the actions API.
+ *
+ * @param clean setting this to false will skip deleting expired manifests and files
+ * @return this for method chaining
+ */
+ ExpireSnapshots cleanExpiredFiles(boolean clean);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.function.Consumer;
/**
* API for removing old {@link Snapshot snapshots} from a table.
* <p>
* This API accumulates snapshot deletions and commits the new list to the table. This API does not
* allow deleting the current snapshot.
* <p>
* When committing, these changes will be applied to the latest table metadata. Commit conflicts
* will be resolved by applying the changes to the new latest metadata and reattempting the commit.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted. {@link #deleteWith(Consumer)} can be used
* to pass an alternative deletion method.
*
* {@link #apply()} returns a list of the snapshots that will be removed.
*/
public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
/**
* Expires a specific {@link Snapshot} identified by id.
*
* @param snapshotId long id of the snapshot to expire
* @return this for method chaining
*/
ExpireSnapshots expireSnapshotId(long snapshotId);
/**
* Expires all snapshots older than the given timestamp.
*
* @param timestampMillis a long timestamp, as returned by {@link System#currentTimeMillis()}
* @return this for method chaining
*/
ExpireSnapshots expireOlderThan(long timestampMillis);
/**
* Retains the most recent ancestors of the current snapshot.
* <p>
* If a snapshot would be expired because it is older than the expiration timestamp, but is one of
* the {@code numSnapshot} most recent ancestors of the current state, it will be retained. This
* will not cause snapshots explicitly identified by id from expiring.
* <p>
* This may keep more than {@code numSnapshot} ancestors if snapshots are added concurrently. This
* may keep less than {@code numSnapshot} ancestors if the current table state does not have that many.
*
* @param numSnapshots the number of snapshots to retain
* @return this for method chaining
*/
ExpireSnapshots retainLast(int numSnapshots);
/**
* Passes an alternative delete implementation that will be used for manifests and data files.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted.
* <p>
* If this method is not called, unnecessary manifests and data files will still be deleted.
*
* @param deleteFunc a function that will be called to delete manifests and data files
* @return this for method chaining
*/
ExpireSnapshots deleteWith(Consumer<String> deleteFunc);
/**
* Passes an alternative executor service that will be used for manifests and data files deletion.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted.
* <p>
* If this method is not called, unnecessary manifests and data files will still be deleted using a single threaded
* executor service.
*
* @param executorService an executor service to parallelize tasks to delete manifests and data files
* @return this for method chaining
*/
ExpireSnapshots executeWith(ExecutorService executorService);
}
| 1 | 22,165 | nit: is this added deliberately? | apache-iceberg | java |
@@ -8,6 +8,8 @@ class SiteControllerTest < ActionController::TestCase
def setup
Object.const_set("ID_KEY", client_applications(:oauth_web_app).key)
Object.const_set("POTLATCH2_KEY", client_applications(:oauth_web_app).key)
+
+ stub_request(:get, "http://api.hostip.info/country.php?ip=0.0.0.0")
end
## | 1 | require "test_helper"
class SiteControllerTest < ActionController::TestCase
api_fixtures
##
# setup oauth keys
def setup
Object.const_set("ID_KEY", client_applications(:oauth_web_app).key)
Object.const_set("POTLATCH2_KEY", client_applications(:oauth_web_app).key)
end
##
# clear oauth keys
def teardown
Object.send("remove_const", "ID_KEY")
Object.send("remove_const", "POTLATCH2_KEY")
end
##
# test all routes which lead to this controller
def test_routes
assert_routing(
{ :path => "/", :method => :get },
{ :controller => "site", :action => "index" }
)
assert_routing(
{ :path => "/", :method => :post },
{ :controller => "site", :action => "index" }
)
assert_routing(
{ :path => "/edit", :method => :get },
{ :controller => "site", :action => "edit" }
)
assert_recognizes(
{ :controller => "site", :action => "edit", :format => "html" },
{ :path => "/edit.html", :method => :get }
)
assert_routing(
{ :path => "/copyright", :method => :get },
{ :controller => "site", :action => "copyright" }
)
assert_routing(
{ :path => "/copyright/locale", :method => :get },
{ :controller => "site", :action => "copyright", :copyright_locale => "locale" }
)
assert_routing(
{ :path => "/welcome", :method => :get },
{ :controller => "site", :action => "welcome" }
)
assert_routing(
{ :path => "/fixthemap", :method => :get },
{ :controller => "site", :action => "fixthemap" }
)
assert_routing(
{ :path => "/export", :method => :get },
{ :controller => "site", :action => "export" }
)
assert_recognizes(
{ :controller => "site", :action => "export", :format => "html" },
{ :path => "/export.html", :method => :get }
)
assert_routing(
{ :path => "/offline", :method => :get },
{ :controller => "site", :action => "offline" }
)
assert_routing(
{ :path => "/key", :method => :get },
{ :controller => "site", :action => "key" }
)
assert_routing(
{ :path => "/go/shortcode", :method => :get },
{ :controller => "site", :action => "permalink", :code => "shortcode" }
)
assert_routing(
{ :path => "/preview/formatname", :method => :post },
{ :controller => "site", :action => "preview", :format => "formatname" }
)
assert_routing(
{ :path => "/id", :method => :get },
{ :controller => "site", :action => "id" }
)
end
# Test the index page
def test_index
get :index
assert_response :success
assert_template "index"
end
# Test the index page redirects
def test_index_redirect
get :index, :node => 123
assert_redirected_to :controller => :browse, :action => :node, :id => 123
get :index, :way => 123
assert_redirected_to :controller => :browse, :action => :way, :id => 123
get :index, :relation => 123
assert_redirected_to :controller => :browse, :action => :relation, :id => 123
get :index, :note => 123
assert_redirected_to :controller => :browse, :action => :note, :id => 123
get :index, :query => "test"
assert_redirected_to :controller => :geocoder, :action => :search, :query => "test"
get :index, :lat => 4, :lon => 5
assert_redirected_to :controller => :site, :action => :index, :anchor => "map=5/4/5"
get :index, :lat => 4, :lon => 5, :zoom => 3
assert_redirected_to :controller => :site, :action => :index, :anchor => "map=3/4/5"
get :index, :layers => "T"
assert_redirected_to :controller => :site, :action => :index, :anchor => "layers=T"
get :index, :notes => "yes"
assert_redirected_to :controller => :site, :action => :index, :anchor => "layers=N"
get :index, :lat => 4, :lon => 5, :zoom => 3, :layers => "T"
assert_redirected_to :controller => :site, :action => :index, :anchor => "map=3/4/5&layers=T"
end
# Test the permalink redirect
def test_permalink
get :permalink, :code => "wBz3--"
assert_response :redirect
assert_redirected_to :controller => :site, :action => :index, :anchor => "map=3/4.8779296875/3.955078125"
get :permalink, :code => "wBz3--", :m => ""
assert_response :redirect
assert_redirected_to :controller => :site, :action => :index, :mlat => "4.8779296875", :mlon => "3.955078125", :anchor => "map=3/4.8779296875/3.955078125"
get :permalink, :code => "wBz3--", :layers => "T"
assert_response :redirect
assert_redirected_to :controller => :site, :action => :index, :anchor => "map=3/4.8779296875/3.955078125&layers=T"
get :permalink, :code => "wBz3--", :node => 1
assert_response :redirect
assert_redirected_to :controller => :browse, :action => :node, :id => 1, :anchor => "map=3/4.8779296875/3.955078125"
get :permalink, :code => "wBz3--", :way => 2
assert_response :redirect
assert_redirected_to :controller => :browse, :action => :way, :id => 2, :anchor => "map=3/4.8779296875/3.955078125"
get :permalink, :code => "wBz3--", :relation => 3
assert_response :redirect
assert_redirected_to :controller => :browse, :action => :relation, :id => 3, :anchor => "map=3/4.8779296875/3.955078125"
get :permalink, :code => "wBz3--", :changeset => 4
assert_response :redirect
assert_redirected_to :controller => :browse, :action => :changeset, :id => 4, :anchor => "map=3/4.8779296875/3.955078125"
end
# Test the key page
def test_key
xhr :get, :key
assert_response :success
assert_template "key"
assert_template :layout => false
end
# Test the edit page redirects when you aren't logged in
def test_edit
get :edit
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => "/edit"
end
# Test the right editor gets used when the user hasn't set a preference
def test_edit_without_preference
get :edit, nil, :user => users(:public_user).id
assert_response :success
assert_template "edit"
assert_template :partial => "_#{DEFAULT_EDITOR}", :count => 1
end
# Test the right editor gets used when the user has set a preference
def test_edit_with_preference
user = users(:public_user)
user.preferred_editor = "id"
user.save!
get :edit, nil, :user => user.id
assert_response :success
assert_template "edit"
assert_template :partial => "_id", :count => 1
user = users(:public_user)
user.preferred_editor = "potlatch2"
user.save!
get :edit, nil, :user => user.id
assert_response :success
assert_template "edit"
assert_template :partial => "_potlatch2", :count => 1
user = users(:public_user)
user.preferred_editor = "potlatch"
user.save!
get :edit, nil, :user => user.id
assert_response :success
assert_template "edit"
assert_template :partial => "_potlatch", :count => 1
user = users(:public_user)
user.preferred_editor = "remote"
user.save!
get :edit, nil, :user => user.id
assert_response :success
assert_template "index"
end
# Test the right editor gets used when the URL has an override
def test_edit_with_override
get :edit, { :editor => "id" }, { :user => users(:public_user).id }
assert_response :success
assert_template "edit"
assert_template :partial => "_id", :count => 1
get :edit, { :editor => "potlatch2" }, { :user => users(:public_user).id }
assert_response :success
assert_template "edit"
assert_template :partial => "_potlatch2", :count => 1
get :edit, { :editor => "potlatch" }, { :user => users(:public_user).id }
assert_response :success
assert_template "edit"
assert_template :partial => "_potlatch", :count => 1
get :edit, { :editor => "remote" }, { :user => users(:public_user).id }
assert_response :success
assert_template "index"
end
# Test editing a specific node
def test_edit_with_node
user = users(:public_user)
node = current_nodes(:visible_node)
get :edit, { :node => node.id }, { :user => user.id }
assert_response :success
assert_template "edit"
assert_equal 1.0, assigns(:lat)
assert_equal 1.0, assigns(:lon)
assert_equal 18, assigns(:zoom)
end
# Test editing a specific way
def test_edit_with_way
user = users(:public_user)
way = current_ways(:visible_way)
get :edit, { :way => way.id }, { :user => user.id }
assert_response :success
assert_template "edit"
assert_equal 3.0, assigns(:lat)
assert_equal 3.0, assigns(:lon)
assert_equal 17, assigns(:zoom)
end
# Test editing a specific note
def test_edit_with_note
user = users(:public_user)
note = create(:note) do |n|
n.comments.create(:author_id => user.id)
end
get :edit, { :note => note.id }, { :user => user.id }
assert_response :success
assert_template "edit"
assert_equal 1.0, assigns(:lat)
assert_equal 1.0, assigns(:lon)
assert_equal 17, assigns(:zoom)
end
# Test editing a specific GPX trace
def test_edit_with_gpx
user = users(:public_user)
gpx = gpx_files(:public_trace_file)
get :edit, { :gpx => gpx.id }, { :user => user.id }
assert_response :success
assert_template "edit"
assert_equal 1.0, assigns(:lat)
assert_equal 1.0, assigns(:lon)
assert_equal 16, assigns(:zoom)
end
# Test the edit page redirects
def test_edit_redirect
get :edit, :lat => 4, :lon => 5
assert_redirected_to :controller => :site, :action => :edit, :anchor => "map=5/4/5"
get :edit, :lat => 4, :lon => 5, :zoom => 3
assert_redirected_to :controller => :site, :action => :edit, :anchor => "map=3/4/5"
get :edit, :lat => 4, :lon => 5, :zoom => 3, :editor => "id"
assert_redirected_to :controller => :site, :action => :edit, :editor => "id", :anchor => "map=3/4/5"
end
# Test the copyright page
def test_copyright
get :copyright
assert_response :success
assert_template "copyright"
end
# Test the welcome page
def test_welcome
get :welcome
assert_response :redirect
assert_redirected_to :controller => :user, :action => :login, :referer => "/welcome"
get :welcome, nil, :user => users(:public_user).id
assert_response :success
assert_template "welcome"
end
# Test the fixthemap page
def test_fixthemap
get :fixthemap
assert_response :success
assert_template "fixthemap"
end
# Test the help page
def test_help
get :help
assert_response :success
assert_template "help"
end
# Test the about page
def test_about
get :about
assert_response :success
assert_template "about"
end
# Test the export page
def test_export
get :export
assert_response :success
assert_template "export"
assert_template :layout => "map"
xhr :get, :export
assert_response :success
assert_template "export"
assert_template :layout => "xhr"
end
# Test the offline page
def test_offline
get :offline
assert_response :success
assert_template "offline"
end
# Test the rich text preview
def test_preview
xhr :post, :preview, :format => "html"
assert_response :success
xhr :post, :preview, :format => "markdown"
assert_response :success
xhr :post, :preview, :format => "text"
assert_response :success
end
# Test the id frame
def test_id
get :id, nil, :user => users(:public_user).id
assert_response :success
assert_template "id"
assert_template :layout => false
end
end
| 1 | 10,254 | Given the number of tests which need this (I think I counted seven) should we maybe just install this one globally? Is there even a place to do that? Something in `test_helper` maybe? | openstreetmap-openstreetmap-website | rb |
@@ -495,6 +495,16 @@ function applyWriteConcern(target, sources, options) {
return target;
}
+/**
+ * Checks if a given value is a Promise
+ *
+ * @param {*} maybePromise
+ * @return true if the provided value is a Promise
+ */
+function isPromiseLike(maybePromise) {
+ return maybePromise && typeof maybePromise.then === 'function';
+}
+
exports.filterOptions = filterOptions;
exports.mergeOptions = mergeOptions;
exports.translateOptions = translateOptions; | 1 | 'use strict';
const MongoError = require('mongodb-core').MongoError;
const ReadPreference = require('mongodb-core').ReadPreference;
var shallowClone = function(obj) {
var copy = {};
for (var name in obj) copy[name] = obj[name];
return copy;
};
// Figure out the read preference
var translateReadPreference = function(options) {
var r = null;
if (options.readPreference) {
r = options.readPreference;
} else {
return options;
}
if (typeof r === 'string') {
options.readPreference = new ReadPreference(r);
} else if (r && !(r instanceof ReadPreference) && typeof r === 'object') {
const mode = r.mode || r.preference;
if (mode && typeof mode === 'string') {
options.readPreference = new ReadPreference(mode, r.tags, {
maxStalenessSeconds: r.maxStalenessSeconds
});
}
} else if (!(r instanceof ReadPreference)) {
throw new TypeError('Invalid read preference: ' + r);
}
return options;
};
// Set simple property
var getSingleProperty = function(obj, name, value) {
Object.defineProperty(obj, name, {
enumerable: true,
get: function() {
return value;
}
});
};
var formatSortValue = (exports.formatSortValue = function(sortDirection) {
var value = ('' + sortDirection).toLowerCase();
switch (value) {
case 'ascending':
case 'asc':
case '1':
return 1;
case 'descending':
case 'desc':
case '-1':
return -1;
default:
throw new Error(
'Illegal sort clause, must be of the form ' +
"[['field1', '(ascending|descending)'], " +
"['field2', '(ascending|descending)']]"
);
}
});
var formattedOrderClause = (exports.formattedOrderClause = function(sortValue) {
var orderBy = {};
if (sortValue == null) return null;
if (Array.isArray(sortValue)) {
if (sortValue.length === 0) {
return null;
}
for (var i = 0; i < sortValue.length; i++) {
if (sortValue[i].constructor === String) {
orderBy[sortValue[i]] = 1;
} else {
orderBy[sortValue[i][0]] = formatSortValue(sortValue[i][1]);
}
}
} else if (sortValue != null && typeof sortValue === 'object') {
orderBy = sortValue;
} else if (typeof sortValue === 'string') {
orderBy[sortValue] = 1;
} else {
throw new Error(
'Illegal sort clause, must be of the form ' +
"[['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]"
);
}
return orderBy;
});
var checkCollectionName = function checkCollectionName(collectionName) {
if ('string' !== typeof collectionName) {
throw new MongoError('collection name must be a String');
}
if (!collectionName || collectionName.indexOf('..') !== -1) {
throw new MongoError('collection names cannot be empty');
}
if (
collectionName.indexOf('$') !== -1 &&
collectionName.match(/((^\$cmd)|(oplog\.\$main))/) == null
) {
throw new MongoError("collection names must not contain '$'");
}
if (collectionName.match(/^\.|\.$/) != null) {
throw new MongoError("collection names must not start or end with '.'");
}
// Validate that we are not passing 0x00 in the colletion name
if (collectionName.indexOf('\x00') !== -1) {
throw new MongoError('collection names cannot contain a null character');
}
};
var handleCallback = function(callback, err, value1, value2) {
try {
if (callback == null) return;
if (callback) {
return value2 ? callback(err, value1, value2) : callback(err, value1);
}
} catch (err) {
process.nextTick(function() {
throw err;
});
return false;
}
return true;
};
/**
* Wrap a Mongo error document in an Error instance
* @ignore
* @api private
*/
var toError = function(error) {
if (error instanceof Error) return error;
var msg = error.err || error.errmsg || error.errMessage || error;
var e = MongoError.create({ message: msg, driver: true });
// Get all object keys
var keys = typeof error === 'object' ? Object.keys(error) : [];
for (var i = 0; i < keys.length; i++) {
try {
e[keys[i]] = error[keys[i]];
} catch (err) {
// continue
}
}
return e;
};
/**
* @ignore
*/
var normalizeHintField = function normalizeHintField(hint) {
var finalHint = null;
if (typeof hint === 'string') {
finalHint = hint;
} else if (Array.isArray(hint)) {
finalHint = {};
hint.forEach(function(param) {
finalHint[param] = 1;
});
} else if (hint != null && typeof hint === 'object') {
finalHint = {};
for (var name in hint) {
finalHint[name] = hint[name];
}
}
return finalHint;
};
/**
* Create index name based on field spec
*
* @ignore
* @api private
*/
var parseIndexOptions = function(fieldOrSpec) {
var fieldHash = {};
var indexes = [];
var keys;
// Get all the fields accordingly
if ('string' === typeof fieldOrSpec) {
// 'type'
indexes.push(fieldOrSpec + '_' + 1);
fieldHash[fieldOrSpec] = 1;
} else if (Array.isArray(fieldOrSpec)) {
fieldOrSpec.forEach(function(f) {
if ('string' === typeof f) {
// [{location:'2d'}, 'type']
indexes.push(f + '_' + 1);
fieldHash[f] = 1;
} else if (Array.isArray(f)) {
// [['location', '2d'],['type', 1]]
indexes.push(f[0] + '_' + (f[1] || 1));
fieldHash[f[0]] = f[1] || 1;
} else if (isObject(f)) {
// [{location:'2d'}, {type:1}]
keys = Object.keys(f);
keys.forEach(function(k) {
indexes.push(k + '_' + f[k]);
fieldHash[k] = f[k];
});
} else {
// undefined (ignore)
}
});
} else if (isObject(fieldOrSpec)) {
// {location:'2d', type:1}
keys = Object.keys(fieldOrSpec);
keys.forEach(function(key) {
indexes.push(key + '_' + fieldOrSpec[key]);
fieldHash[key] = fieldOrSpec[key];
});
}
return {
name: indexes.join('_'),
keys: keys,
fieldHash: fieldHash
};
};
var isObject = (exports.isObject = function(arg) {
return '[object Object]' === Object.prototype.toString.call(arg);
});
var debugOptions = function(debugFields, options) {
var finaloptions = {};
debugFields.forEach(function(n) {
finaloptions[n] = options[n];
});
return finaloptions;
};
var decorateCommand = function(command, options, exclude) {
for (var name in options) {
if (exclude[name] == null) command[name] = options[name];
}
return command;
};
var mergeOptions = function(target, source) {
for (var name in source) {
target[name] = source[name];
}
return target;
};
// Merge options with translation
var translateOptions = function(target, source) {
var translations = {
// SSL translation options
sslCA: 'ca',
sslCRL: 'crl',
sslValidate: 'rejectUnauthorized',
sslKey: 'key',
sslCert: 'cert',
sslPass: 'passphrase',
// SocketTimeout translation options
socketTimeoutMS: 'socketTimeout',
connectTimeoutMS: 'connectionTimeout',
// Replicaset options
replicaSet: 'setName',
rs_name: 'setName',
secondaryAcceptableLatencyMS: 'acceptableLatency',
connectWithNoPrimary: 'secondaryOnlyConnectionAllowed',
// Mongos options
acceptableLatencyMS: 'localThresholdMS'
};
for (var name in source) {
if (translations[name]) {
target[translations[name]] = source[name];
} else {
target[name] = source[name];
}
}
return target;
};
var filterOptions = function(options, names) {
var filterOptions = {};
for (var name in options) {
if (names.indexOf(name) !== -1) filterOptions[name] = options[name];
}
// Filtered options
return filterOptions;
};
// Write concern keys
var writeConcernKeys = ['w', 'j', 'wtimeout', 'fsync'];
// Merge the write concern options
var mergeOptionsAndWriteConcern = function(targetOptions, sourceOptions, keys, mergeWriteConcern) {
// Mix in any allowed options
for (var i = 0; i < keys.length; i++) {
if (!targetOptions[keys[i]] && sourceOptions[keys[i]] !== undefined) {
targetOptions[keys[i]] = sourceOptions[keys[i]];
}
}
// No merging of write concern
if (!mergeWriteConcern) return targetOptions;
// Found no write Concern options
var found = false;
for (i = 0; i < writeConcernKeys.length; i++) {
if (targetOptions[writeConcernKeys[i]]) {
found = true;
break;
}
}
if (!found) {
for (i = 0; i < writeConcernKeys.length; i++) {
if (sourceOptions[writeConcernKeys[i]]) {
targetOptions[writeConcernKeys[i]] = sourceOptions[writeConcernKeys[i]];
}
}
}
return targetOptions;
};
/**
* Executes the given operation with provided arguments.
*
* This method reduces large amounts of duplication in the entire codebase by providing
* a single point for determining whether callbacks or promises should be used. Additionally
* it allows for a single point of entry to provide features such as implicit sessions, which
* are required by the Driver Sessions specification in the event that a ClientSession is
* not provided
*
* @param {object} topology The topology to execute this operation on
* @param {function} operation The operation to execute
* @param {array} args Arguments to apply the provided operation
* @param {object} [options] Options that modify the behavior of the method
* @param {function]} [options.resultMutator] Allows for the result of the operation to be changed for custom return types
*/
const executeOperation = (topology, operation, args, options) => {
if (topology == null) {
throw new TypeError('This method requires a valid topology instance');
}
if (!Array.isArray(args)) {
throw new TypeError('This method requires an array of arguments to apply');
}
options = options || {};
const Promise = topology.s.promiseLibrary;
let resultMutator = options.resultMutator;
let callback = args[args.length - 1];
// The driver sessions spec mandates that we implicitly create sessions for operations
// that are not explicitly provided with a session.
let session, opOptions, owner;
if (!options.skipSessions && topology.hasSessionSupport()) {
opOptions = args[args.length - 2];
if (opOptions == null || opOptions.session == null) {
owner = Symbol();
session = topology.startSession({ owner });
const optionsIndex = args.length - 2;
args[optionsIndex] = Object.assign({}, args[optionsIndex], { session: session });
} else if (opOptions.session && opOptions.session.hasEnded) {
throw new MongoError('Use of expired sessions is not permitted');
}
}
const makeExecuteCallback = (resolve, reject) =>
function executeCallback(err, result) {
if (session && session.owner === owner && !options.returnsCursor) {
session.endSession(() => {
delete opOptions.session;
if (err) return reject(err);
if (resultMutator) return resolve(resultMutator(result));
resolve(result);
});
} else {
if (err) return reject(err);
if (resultMutator) return resolve(resultMutator(result));
resolve(result);
}
};
// Execute using callback
if (typeof callback === 'function') {
callback = args.pop();
const handler = makeExecuteCallback(
result => callback(null, result),
err => callback(err, null)
);
args.push(handler);
try {
return operation.apply(null, args);
} catch (e) {
handler(e);
throw e;
}
}
// Return a Promise
if (args[args.length - 1] != null) {
throw new TypeError('final argument to `executeOperation` must be a callback');
}
return new Promise(function(resolve, reject) {
const handler = makeExecuteCallback(resolve, reject);
args[args.length - 1] = handler;
try {
return operation.apply(null, args);
} catch (e) {
handler(e);
}
});
};
/**
* Applies a write concern to a command based on well defined inheritance rules, optionally
* detecting support for the write concern in the first place.
*
* @param {Object} target the target command we will be applying the write concern to
* @param {Object} sources sources where we can inherit default write concerns from
* @param {Object} [options] optional settings passed into a command for write concern overrides
* @returns {Object} the (now) decorated target
*/
function applyWriteConcern(target, sources, options) {
options = options || {};
const db = sources.db;
const coll = sources.collection;
if (options.session && options.session.inTransaction()) {
// writeConcern is not allowed within a multi-statement transaction
if (target.writeConcern) {
delete target.writeConcern;
}
return target;
}
// NOTE: there is probably a much better place for this
if (db && db.s.options.retryWrites) {
target.retryWrites = true;
}
if (options.w != null || options.j != null || options.fsync != null) {
const writeConcern = {};
if (options.w != null) writeConcern.w = options.w;
if (options.wtimeout != null) writeConcern.wtimeout = options.wtimeout;
if (options.j != null) writeConcern.j = options.j;
if (options.fsync != null) writeConcern.fsync = options.fsync;
return Object.assign(target, { writeConcern });
}
if (
coll &&
(coll.writeConcern.w != null || coll.writeConcern.j != null || coll.writeConcern.fsync != null)
) {
return Object.assign(target, { writeConcern: Object.assign({}, coll.writeConcern) });
}
if (
db &&
(db.writeConcern.w != null || db.writeConcern.j != null || db.writeConcern.fsync != null)
) {
return Object.assign(target, { writeConcern: Object.assign({}, db.writeConcern) });
}
return target;
}
exports.filterOptions = filterOptions;
exports.mergeOptions = mergeOptions;
exports.translateOptions = translateOptions;
exports.shallowClone = shallowClone;
exports.getSingleProperty = getSingleProperty;
exports.checkCollectionName = checkCollectionName;
exports.toError = toError;
exports.formattedOrderClause = formattedOrderClause;
exports.parseIndexOptions = parseIndexOptions;
exports.normalizeHintField = normalizeHintField;
exports.handleCallback = handleCallback;
exports.decorateCommand = decorateCommand;
exports.isObject = isObject;
exports.debugOptions = debugOptions;
exports.MAX_JS_INT = 0x20000000000000;
exports.mergeOptionsAndWriteConcern = mergeOptionsAndWriteConcern;
exports.translateReadPreference = translateReadPreference;
exports.executeOperation = executeOperation;
exports.applyWriteConcern = applyWriteConcern;
| 1 | 14,248 | we don't use this anymore, so we can delete it. | mongodb-node-mongodb-native | js |
@@ -283,6 +283,8 @@ func initTelemetry(genesis bookkeeping.Genesis, log logging.Logger, dataDirector
fmt.Fprintln(os.Stdout, "error loading telemetry config", err)
return
}
+ fmt.Fprintln(os.Stdout, "algoh telemetry configured from file:", telemetryConfig.FilePath)
+
// Apply telemetry override.
telemetryConfig.Enable = logging.TelemetryOverride(*telemetryOverride) | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"sync"
"syscall"
"time"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/daemon/algod/api/client"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/shared/algoh"
"github.com/algorand/go-algorand/tools/network"
"github.com/algorand/go-algorand/util"
)
var dataDirectory = flag.String("d", "", "Root Algorand daemon data path")
var versionCheck = flag.Bool("v", false, "Display and write current build version and exit")
var telemetryOverride = flag.String("t", "", `Override telemetry setting if supported (Use "true", "false", "0" or "1")`)
const algodFileName = "algod"
const goalFileName = "goal"
var exeDir string
func init() {
}
type stdCollector struct {
output string
}
func (c *stdCollector) Write(p []byte) (n int, err error) {
s := string(p)
c.output += s
return len(p), nil
}
func main() {
blockWatcherInitialized := false
flag.Parse()
nc := getNodeController()
genesis, err := nc.GetGenesis()
if err != nil {
fmt.Fprintln(os.Stdout, "error loading telemetry config", err)
return
}
dataDir := ensureDataDir()
absolutePath, absPathErr := filepath.Abs(dataDir)
config.UpdateVersionDataDir(absolutePath)
if *versionCheck {
fmt.Println(config.FormatVersionAndLicense())
return
}
// If data directory doesn't exist, we can't run. Don't bother trying.
if len(dataDir) == 0 {
fmt.Fprintln(os.Stderr, "Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment.")
os.Exit(1)
}
if absPathErr != nil {
reportErrorf("Can't convert data directory's path to absolute, %v\n", dataDir)
}
if _, err := os.Stat(absolutePath); err != nil {
reportErrorf("Data directory %s does not appear to be valid\n", dataDir)
}
algohConfig, err := algoh.LoadConfigFromFile(filepath.Join(dataDir, algoh.ConfigFilename))
if err != nil && !os.IsNotExist(err) {
reportErrorf("Error loading configuration, %v\n", err)
}
validateConfig(algohConfig)
done := make(chan struct{})
log := logging.Base()
configureLogging(genesis, log, absolutePath, done)
defer log.CloseTelemetry()
exeDir, err = util.ExeDir()
if err != nil {
reportErrorf("Error getting ExeDir: %v\n", err)
}
var errorOutput stdCollector
var output stdCollector
go func() {
args := make([]string, len(os.Args)-1)
copy(args, os.Args[1:]) // Copy our arguments (skip the executable)
if log.GetTelemetryEnabled() {
args = append(args, "-s", log.GetTelemetrySession())
}
algodPath := filepath.Join(exeDir, algodFileName)
cmd := exec.Command(algodPath, args...)
cmd.Stderr = &errorOutput
cmd.Stdout = &output
err = cmd.Start()
if err != nil {
reportErrorf("error starting algod: %v", err)
}
err = cmd.Wait()
if err != nil {
reportErrorf("error waiting for algod: %v", err)
}
close(done)
// capture logs if algod terminated prior to blockWatcher starting
if !blockWatcherInitialized {
captureErrorLogs(algohConfig, errorOutput, output, absolutePath, true)
}
log.Infoln("++++++++++++++++++++++++++++++++++++++++")
log.Infoln("algod exited. Exiting...")
log.Infoln("++++++++++++++++++++++++++++++++++++++++")
}()
// Set up error capturing
defer func() {
captureErrorLogs(algohConfig, errorOutput, output, absolutePath, false)
}()
// Handle signals cleanly
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
signal.Ignore(syscall.SIGHUP)
go func() {
sig := <-c
fmt.Printf("Exiting algoh on %v\n", sig)
os.Exit(0)
}()
algodClient, err := waitForClient(nc, done)
if err != nil {
reportErrorf("error creating Rest Client: %v\n", err)
}
var wg sync.WaitGroup
deadMan := makeDeadManWatcher(algohConfig.DeadManTimeSec, algodClient, algohConfig.UploadOnError, done, &wg)
wg.Add(1)
listeners := []blockListener{deadMan}
if algohConfig.SendBlockStats {
// Note: Resume can be implemented here. Store blockListener state and set curBlock based on latestBlock/lastBlock.
listeners = append(listeners, &blockstats{log: logging.Base()})
}
delayBetweenStatusChecks := time.Duration(algohConfig.StatusDelayMS) * time.Millisecond
stallDetectionDelay := time.Duration(algohConfig.StallDelayMS) * time.Millisecond
runBlockWatcher(listeners, algodClient, done, &wg, delayBetweenStatusChecks, stallDetectionDelay)
wg.Add(1)
blockWatcherInitialized = true
wg.Wait()
fmt.Println("Exiting algoh normally...")
}
func waitForClient(nc nodecontrol.NodeController, abort chan struct{}) (client client.RestClient, err error) {
for {
client, err = getRestClient(nc)
if err == nil {
return client, nil
}
select {
case <-abort:
err = fmt.Errorf("aborted waiting for client")
return
case <-time.After(100 * time.Millisecond):
}
}
}
func getRestClient(nc nodecontrol.NodeController) (rc client.RestClient, err error) {
// Fetch the algod client
algodClient, err := nc.AlgodClient()
if err != nil {
return
}
// Make sure the node is running
_, err = algodClient.Status()
if err != nil {
return
}
return algodClient, nil
}
func resolveDataDir() string {
// Figure out what data directory to tell algod to use.
// If not specified on cmdline with '-d', look for default in environment.
var dir string
if dataDirectory == nil || *dataDirectory == "" {
dir = os.Getenv("ALGORAND_DATA")
} else {
dir = *dataDirectory
}
return dir
}
func ensureDataDir() string {
// Get the target data directory to work against,
// then handle the scenario where no data directory is provided.
dir := resolveDataDir()
if dir == "" {
reportErrorf("Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment. Exiting.\n")
}
return dir
}
func getNodeController() nodecontrol.NodeController {
binDir, err := util.ExeDir()
if err != nil {
panic(err)
}
nc := nodecontrol.MakeNodeController(binDir, ensureDataDir())
return nc
}
func configureLogging(genesis bookkeeping.Genesis, log logging.Logger, rootPath string, abort chan struct{}) {
log = logging.Base()
liveLog := fmt.Sprintf("%s/host.log", rootPath)
fmt.Println("Logging to: ", liveLog)
writer, err := os.OpenFile(liveLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
panic(fmt.Sprintf("configureLogging: cannot open log file %v", err))
}
log.SetOutput(writer)
log.SetJSONFormatter()
log.SetLevel(logging.Debug)
initTelemetry(genesis, log, rootPath, abort)
// if we have the telemetry enabled, we want to use it's sessionid as part of the
// collected metrics decorations.
fmt.Fprintln(writer, "++++++++++++++++++++++++++++++++++++++++")
fmt.Fprintln(writer, "Logging Starting")
fmt.Fprintln(writer, "++++++++++++++++++++++++++++++++++++++++")
}
func initTelemetry(genesis bookkeeping.Genesis, log logging.Logger, dataDirectory string, abort chan struct{}) {
// Enable telemetry hook in daemon to send logs to cloud
// If ALGOTEST env variable is set, telemetry is disabled - allows disabling telemetry for tests
isTest := os.Getenv("ALGOTEST") != ""
if !isTest {
telemetryConfig, err := logging.EnsureTelemetryConfig(&dataDirectory, genesis.ID())
if err != nil {
fmt.Fprintln(os.Stdout, "error loading telemetry config", err)
return
}
// Apply telemetry override.
telemetryConfig.Enable = logging.TelemetryOverride(*telemetryOverride)
if telemetryConfig.Enable {
err = log.EnableTelemetry(telemetryConfig)
if err != nil {
fmt.Fprintln(os.Stdout, "error creating telemetry hook", err)
return
}
if log.GetTelemetryEnabled() {
cfg, err := config.LoadConfigFromDisk(dataDirectory)
if err != nil && !os.IsNotExist(err) {
log.Fatalf("Cannot load config: %v", err)
}
// If the telemetry URI is not set, periodically check SRV records for new telemetry URI
if log.GetTelemetryURI() == "" {
network.StartTelemetryURIUpdateService(time.Minute, cfg, genesis.Network, log, abort)
}
// For privacy concerns, we don't want to provide the full data directory to telemetry.
// But to be useful where multiple nodes are installed for convenience, we should be
// able to discriminate between instances with the last letter of the path.
if dataDirectory != "" {
dataDirectory = dataDirectory[len(dataDirectory)-1:]
}
currentVersion := config.GetCurrentVersion()
startupDetails := telemetryspec.StartupEventDetails{
Version: currentVersion.String(),
CommitHash: currentVersion.CommitHash,
Branch: currentVersion.Branch,
Channel: currentVersion.Channel,
InstanceHash: crypto.Hash([]byte(dataDirectory)).String(),
}
log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.StartupEvent, startupDetails)
}
}
}
}
// capture algod error output and optionally upload logs
func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, output stdCollector, absolutePath string, errorCondition bool) {
if errorOutput.output != "" {
fmt.Fprintf(os.Stdout, "errorOutput.output: `%s`\n", errorOutput.output)
errorCondition = true
fmt.Fprintf(os.Stderr, errorOutput.output)
details := telemetryspec.ErrorOutputEventDetails{
Error: errorOutput.output,
Output: output.output,
}
log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.ErrorOutputEvent, details)
// Write stdout & stderr streams to disk
_ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdOutFilename), []byte(output.output), os.ModePerm)
_ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdErrFilename), []byte(errorOutput.output), os.ModePerm)
}
if errorCondition && algohConfig.UploadOnError {
fmt.Fprintf(os.Stdout, "Uploading logs...\n")
sendLogs()
}
}
func reportErrorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format, args...)
logging.Base().Fatalf(format, args...)
}
func sendLogs() {
var args []string
args = append(args, "-d", ensureDataDir())
args = append(args, "logging", "send")
goalPath := filepath.Join(exeDir, goalFileName)
cmd := exec.Command(goalPath, args...)
err := cmd.Run()
if err != nil {
reportErrorf("Error sending logs: %v\n", err)
}
}
func validateConfig(config algoh.HostConfig) {
// Enforce a reasonable deadman timeout
if config.DeadManTimeSec > 0 && config.DeadManTimeSec < 30 {
reportErrorf("Config.DeadManTimeSec should be >= 30 seconds (set to %v)\n", config.DeadManTimeSec)
}
}
| 1 | 39,012 | nit: change to "Telemetry configuration loaded from '%s'" | algorand-go-algorand | go |
@@ -182,7 +182,8 @@ func TestSerialiseBlockWitness(t *testing.T) {
if err := bwb.WriteTo(&b); err != nil {
t.Errorf("Could not make block witness: %v", err)
}
- expected := common.FromHex("0xa76862616c616e6365730065636f64657300666861736865731822646b65797300666e6f6e63657300697374727563747572650b6676616c75657300582023181a62d35fe01562158be610f84e047f99f5e74d896da21682d925964ece3a0601024704010402040304")
+
+ expected := common.FromHex("0xa76862616c616e6365730065636f64657300666861736865731822646b65797300666e6f6e63657300697374727563747572650b6676616c756573005820858f70a4b1e6aa71a7edc574d2ca946495a038aa37ce13dc7b7ed15661a6ff2f0601024704010402040304")
if !bytes.Equal(expected, b.Bytes()) {
t.Errorf("Expected %x, got: %x", expected, b.Bytes())
} | 1 | package trie
import (
"bytes"
"testing"
"github.com/ledgerwatch/turbo-geth/common"
)
func TestSupplyKeyValue(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.supplyKey([]byte("key")); err != nil {
t.Errorf("Could not supply key: %v", err)
}
if !bytes.Equal(common.FromHex("0x436b6579"), bwb.Keys.buffer.Bytes()) {
t.Errorf("Expected 0x436b6579 in keys tape, got: %x", bwb.Keys.buffer.Bytes())
}
if err := bwb.supplyValue([]byte("value")); err != nil {
t.Errorf("Could not supply value: %v", err)
}
if !bytes.Equal(common.FromHex("0x4576616c7565"), bwb.Values.buffer.Bytes()) {
t.Errorf("Expected 0x4576616c7565 in values tape, got: %x", bwb.Values.buffer.Bytes())
}
}
func TestSupplyHash(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.supplyHash(common.HexToHash("0x9583498348fc48393abc")); err != nil {
t.Errorf("Could not supply hash: %v", err)
}
if !bytes.Equal(common.FromHex("0x5820000000000000000000000000000000000000000000009583498348fc48393abc"), bwb.Hashes.buffer.Bytes()) {
t.Errorf("Expected 0x5820000000000000000000000000000000000000000000009583498348fc48393abc in hash tape, got: %x", bwb.Hashes.buffer.Bytes())
}
}
func TestSupplyCode(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.supplyCode(common.FromHex("0x9583498348fc48393abc58bc")); err != nil {
t.Errorf("Could not supply code: %v", err)
}
if !bytes.Equal(common.FromHex("0x4c9583498348fc48393abc58bc"), bwb.Codes.buffer.Bytes()) {
t.Errorf("Expected 0x4c9583498348fc48393abc58bc in codes tape, got: %x", bwb.Codes.buffer.Bytes())
}
}
func TestOpLeaf(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.leaf(56); err != nil {
t.Errorf("Could not call leaf: %v", err)
}
if !bytes.Equal(common.FromHex("0x001838"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x001838 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpLeafHash(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.leafHash(56); err != nil {
t.Errorf("Could not call leafHash: %v", err)
}
if !bytes.Equal(common.FromHex("0x011838"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x011838 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpExtension(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.extension(common.FromHex("0x0f05")); err != nil {
t.Errorf("Could not call extension: %v", err)
}
if !bytes.Equal(common.FromHex("0x02420f05"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x02420f05 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpExtensionHash(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.extensionHash(common.FromHex("0x0f05")); err != nil {
t.Errorf("Could not call extensionHash: %v", err)
}
if !bytes.Equal(common.FromHex("0x03420f05"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x03420f05 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpBranch(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.branch(1 + 4); err != nil {
t.Errorf("Could not call branch: %v", err)
}
if !bytes.Equal(common.FromHex("0x0405"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x0405 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpBranchHash(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.branchHash(1 + 4); err != nil {
t.Errorf("Could not call branchHash: %v", err)
}
if !bytes.Equal(common.FromHex("0x0505"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x0505 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpHash(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.hash(3); err != nil {
t.Errorf("Could not call hash: %v", err)
}
if !bytes.Equal(common.FromHex("0x0603"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x0603 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpCode(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.code(); err != nil {
t.Errorf("Could not call code: %v", err)
}
if !bytes.Equal(common.FromHex("0x07"), bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected 0x07 in structure tape, got: %x", bwb.Structure.buffer.Bytes())
}
}
func TestOpAccountLeaf(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.accountLeaf(56, 3); err != nil {
t.Errorf("Could not call acccountLeaf: %v", err)
}
expected := common.FromHex("0x08183803")
if !bytes.Equal(expected, bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected %x in structure tape, got: %x", expected, bwb.Structure.buffer.Bytes())
}
}
func TestOpAccountLeafHash(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.accountLeafHash(56, 3); err != nil {
t.Errorf("Could not call accountLeafHash: %v", err)
}
expected := common.FromHex("0x09183803")
if !bytes.Equal(expected, bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected %x in structure tape, got: %x", expected, bwb.Structure.buffer.Bytes())
}
}
func TestOpEmptyRoot(t *testing.T) {
bwb := NewBlockWitnessBuilder(false)
if err := bwb.emptyRoot(); err != nil {
t.Errorf("Could not call emptyRoot: %v", err)
}
expected := common.FromHex("0x0a")
if !bytes.Equal(expected, bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected %x in structure tape, got: %x", expected, bwb.Structure.buffer.Bytes())
}
}
func TestMakeBlockWitness(t *testing.T) {
tr := New(common.Hash{})
tr.Update([]byte("ABCD0001"), []byte("val1"), 0)
tr.Update([]byte("ABCE0002"), []byte("val2"), 0)
bwb := NewBlockWitnessBuilder(false)
rs := NewResolveSet(2)
if err := bwb.MakeBlockWitness(tr, rs, nil, nil); err != nil {
t.Errorf("Could not make block witness: %v", err)
}
expected := common.FromHex("0x0601024704010402040304")
if !bytes.Equal(expected, bwb.Structure.buffer.Bytes()) {
t.Errorf("Expected %x in structure tape, got: %x", expected, bwb.Structure.buffer.Bytes())
}
}
func TestSerialiseBlockWitness(t *testing.T) {
tr := New(common.Hash{})
tr.Update([]byte("ABCD0001"), []byte("val1"), 0)
tr.Update([]byte("ABCE0002"), []byte("val2"), 0)
bwb := NewBlockWitnessBuilder(false)
rs := NewResolveSet(2)
if err := bwb.MakeBlockWitness(tr, rs, nil, nil); err != nil {
t.Errorf("Could not make block witness: %v", err)
}
var b bytes.Buffer
if err := bwb.WriteTo(&b); err != nil {
t.Errorf("Could not make block witness: %v", err)
}
expected := common.FromHex("0xa76862616c616e6365730065636f64657300666861736865731822646b65797300666e6f6e63657300697374727563747572650b6676616c75657300582023181a62d35fe01562158be610f84e047f99f5e74d896da21682d925964ece3a0601024704010402040304")
if !bytes.Equal(expected, b.Bytes()) {
t.Errorf("Expected %x, got: %x", expected, b.Bytes())
}
tr1, _, err := BlockWitnessToTrie(b.Bytes(), false)
if err != nil {
t.Errorf("Could not restore trie from the block witness: %v", err)
}
if tr.Hash() != tr1.Hash() {
t.Errorf("Reconstructed block witness has different root hash than source trie")
}
}
| 1 | 21,237 | Why did this value change? | ledgerwatch-erigon | go |
@@ -23,6 +23,7 @@ import (
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/kmeta"
+ gcpduckv1alpha1 "github.com/google/knative-gcp/pkg/apis/duck/v1alpha1"
"github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
)
| 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
duckv1alpha1 "knative.dev/eventing/pkg/apis/duck/v1alpha1"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/kmeta"
"github.com/google/knative-gcp/pkg/apis/pubsub/v1alpha1"
)
// PullSubscriptionArgs are the arguments needed to create a Channel Subscriber.
// Every field is required.
type PullSubscriptionArgs struct {
Owner kmeta.OwnerRefable
Name string
Project string
Topic string
ServiceAccount string
Secret *corev1.SecretKeySelector
Labels map[string]string
Annotations map[string]string
Subscriber duckv1alpha1.SubscriberSpec
}
// MakePullSubscription generates (but does not insert into K8s) the
// PullSubscription for Channels.
func MakePullSubscription(args *PullSubscriptionArgs) *v1alpha1.PullSubscription {
spec := v1alpha1.PullSubscriptionSpec{
ServiceAccount: args.ServiceAccount,
Secret: args.Secret,
Project: args.Project,
Topic: args.Topic,
}
reply := args.Subscriber.ReplyURI
subscriber := args.Subscriber.SubscriberURI
// If subscriber and reply is used, map:
// pull.transformer to sub.subscriber
// pull.sink to sub.reply
// Otherwise, pull.sink has to be used, but subscriptions allow for just
// reply or just subscriber. So set the single non-nil uri to to pull.sink.
if subscriber != nil && reply != nil {
spec.Transformer = &duckv1.Destination{
URI: subscriber,
}
spec.Sink = duckv1.Destination{
URI: reply,
}
} else if subscriber != nil {
spec.Sink = duckv1.Destination{
URI: subscriber,
}
} else if reply != nil {
spec.Sink = duckv1.Destination{
URI: reply,
}
}
return &v1alpha1.PullSubscription{
ObjectMeta: metav1.ObjectMeta{
Namespace: args.Owner.GetObjectMeta().GetNamespace(),
Name: args.Name,
Labels: args.Labels,
Annotations: args.Annotations,
OwnerReferences: []metav1.OwnerReference{*kmeta.NewControllerRef(args.Owner)},
},
Spec: spec,
}
}
| 1 | 11,677 | maybe `duckpubsubv1alpha1` ? | google-knative-gcp | go |
@@ -327,6 +327,16 @@ class FirefoxProfile(object):
rc.append(node.data)
return ''.join(rc).strip()
+ def parse_manifest_json(content):
+ """Extracts the details from the contents of a WebExtensions `manifest.json` file."""
+ manifest = json.loads(content)
+ return {
+ 'id': manifest['applications']['gecko']['id'],
+ 'version': manifest['version'],
+ 'name': manifest['version'],
+ 'unpack': False,
+ }
+
if not os.path.exists(addon_path):
raise IOError('Add-on path does not exist: %s' % addon_path)
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import with_statement
import base64
import copy
import json
import os
import re
import shutil
import sys
import tempfile
import zipfile
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from xml.dom import minidom
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
WEBDRIVER_PREFERENCES = "webdriver_prefs.json"
EXTENSION_NAME = "[email protected]"
class AddonFormatError(Exception):
"""Exception for not well-formed add-on manifest files"""
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = None
def __init__(self, profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
if not FirefoxProfile.DEFAULT_PREFERENCES:
with open(os.path.join(os.path.dirname(__file__),
WEBDRIVER_PREFERENCES)) as default_prefs:
FirefoxProfile.DEFAULT_PREFERENCES = json.load(default_prefs)
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES['mutable'])
self.native_events_enabled = True
self.profile_dir = profile_directory
self.tempfolder = None
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
self.tempfolder = tempfile.mkdtemp()
newprof = os.path.join(self.tempfolder, "webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs(os.path.join(self.profile_dir, "user.js"))
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
# Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
self.default_preferences[key] = value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
for key, value in FirefoxProfile.DEFAULT_PREFERENCES['frozen'].items():
self.default_preferences[key] = value
self._write_user_prefs(self.default_preferences)
# Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
try:
port = int(port)
if port < 1 or port > 65535:
raise WebDriverException("Port number must be in the range 1..65535")
except (ValueError, TypeError):
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self.default_preferences["webdriver_accept_untrusted_certs"]
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self.default_preferences["webdriver_assume_untrusted_issuer"]
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self.default_preferences['webdriver_enable_native_events']
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
self.update_preferences()
fp = BytesIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.b64encode(fp.getvalue()).decode('UTF-8')
def set_proxy(self, proxy):
import warnings
warnings.warn(
"This method has been deprecated. Please pass in the proxy object to the Driver Object",
DeprecationWarning)
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
self._set_manual_proxy_preference("socks", proxy.socks_proxy)
elif proxy.proxy_type is ProxyType.PAC:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[0])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[1]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
def _read_existing_userjs(self, userjs):
import warnings
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
try:
self.default_preferences[matches.group(1)] = json.loads(matches.group(2))
except Exception:
warnings.warn("(skipping) failed to json.loads existing preference: " +
matches.group(1) + matches.group(2))
except Exception:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, absolute path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
if not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir)
def _addon_details(self, addon_path):
"""
Returns a dictionary of details about the addon.
:param addon_path: path to the add-on directory or XPI
Returns::
{'id': u'[email protected]', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
details = {
'id': None,
'unpack': False,
'name': None,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
if not os.path.exists(addon_path):
raise IOError('Add-on path does not exist: %s' % addon_path)
try:
if zipfile.is_zipfile(addon_path):
# Bug 944361 - We cannot use 'with' together with zipFile because
# it will cause an exception thrown in Python 2.6.
try:
compressed_file = zipfile.ZipFile(addon_path, 'r')
manifest = compressed_file.read('install.rdf')
finally:
compressed_file.close()
elif os.path.isdir(addon_path):
with open(os.path.join(addon_path, 'install.rdf'), 'r') as f:
manifest = f.read()
else:
raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path)
except (IOError, KeyError) as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
try:
doc = minidom.parseString(manifest)
# Get the namespaces abbreviations
em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#')
rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
description = doc.getElementsByTagName(rdf + 'Description').item(0)
if description is None:
description = doc.getElementsByTagName('Description').item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({entry: get_text(node)})
if details.get('id') is None:
for i in range(description.attributes.length):
attribute = description.attributes.item(i)
if attribute.name == em + 'id':
details.update({'id': attribute.value})
except Exception as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
# turn unpack into a true/false value
if isinstance(details['unpack'], str):
details['unpack'] = details['unpack'].lower() == 'true'
# If no ID is set, the add-on is invalid
if details.get('id') is None:
raise AddonFormatError('Add-on id could not be found.')
return details
| 1 | 15,026 | The id is not mandatory for web extensions. Could you update this to support a web extension without an id? | SeleniumHQ-selenium | js |
@@ -31,11 +31,6 @@ import (
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
-const (
- serviceName = "pubsub.googleapis.com"
- methodName = "google.pubsub.v1.Publisher.CreateTopic"
-)
-
func CloudAuditLogsSourceWithTestImpl(t *testing.T, authConfig lib.AuthConfig) {
project := os.Getenv(lib.ProwProjectKey)
| 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"github.com/google/knative-gcp/test/e2e/lib"
"knative.dev/pkg/test/helpers"
// The following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
const (
serviceName = "pubsub.googleapis.com"
methodName = "google.pubsub.v1.Publisher.CreateTopic"
)
func CloudAuditLogsSourceWithTestImpl(t *testing.T, authConfig lib.AuthConfig) {
project := os.Getenv(lib.ProwProjectKey)
auditlogsName := helpers.AppendRandomString("auditlogs-e2e-test")
targetName := helpers.AppendRandomString(auditlogsName + "-target")
topicName := helpers.AppendRandomString(auditlogsName + "-topic")
resourceName := fmt.Sprintf("projects/%s/topics/%s", project, topicName)
client := lib.Setup(t, true, authConfig.WorkloadIdentity)
defer lib.TearDown(client)
// Create a target Job to receive the events.
lib.MakeAuditLogsJobOrDie(client, methodName, project, resourceName, serviceName, targetName)
// Create the CloudAuditLogsSource.
lib.MakeAuditLogsOrDie(client,
auditlogsName,
methodName,
project,
resourceName,
serviceName,
targetName,
authConfig.PubsubServiceAccount,
)
client.Core.WaitForResourceReadyOrFail(auditlogsName, lib.CloudAuditLogsSourceTypeMeta)
// Audit logs source misses the topic which gets created shortly after the source becomes ready. Need to wait for a few seconds.
// Tried with 45 seconds but the test has been quite flaky.
time.Sleep(90 * time.Second)
topicName, deleteTopic := lib.MakeTopicWithNameOrDie(t, topicName)
defer deleteTopic()
msg, err := client.WaitUntilJobDone(client.Namespace, targetName)
if err != nil {
t.Error(err)
}
t.Logf("Last term message => %s", msg)
if msg != "" {
out := &lib.TargetOutput{}
if err := json.Unmarshal([]byte(msg), out); err != nil {
t.Error(err)
}
if !out.Success {
// Log the output cloudauditlogssource pods.
if logs, err := client.LogsFor(client.Namespace, auditlogsName, lib.CloudAuditLogsSourceTypeMeta); err != nil {
t.Error(err)
} else {
t.Logf("cloudauditlogssource: %+v", logs)
}
// Log the output of the target job pods.
if logs, err := client.LogsFor(client.Namespace, targetName, lib.JobTypeMeta); err != nil {
t.Error(err)
} else {
t.Logf("job: %s\n", logs)
}
t.Fail()
}
}
}
| 1 | 13,533 | I actually prefer them to be defined here. Client who calls `lib` should define the args, not `lib` itself. | google-knative-gcp | go |
@@ -97,9 +97,11 @@ func (t *tty) recvtty(process *libcontainer.Process, socket *os.File) error {
// set raw mode to stdin and also handle interrupt
stdin, err := console.ConsoleFromFile(os.Stdin)
if err != nil {
+ epollConsole.Close()
return err
}
if err := stdin.SetRaw(); err != nil {
+ epollConsole.Close()
return fmt.Errorf("failed to set the terminal from the stdin: %v", err)
}
go handleInterrupt(stdin) | 1 | // +build linux
package main
import (
"fmt"
"io"
"os"
"os/signal"
"sync"
"github.com/containerd/console"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/utils"
)
type tty struct {
epoller *console.Epoller
console *console.EpollConsole
stdin console.Console
closers []io.Closer
postStart []io.Closer
wg sync.WaitGroup
consoleC chan error
}
func (t *tty) copyIO(w io.Writer, r io.ReadCloser) {
defer t.wg.Done()
io.Copy(w, r)
r.Close()
}
// setup pipes for the process so that advanced features like c/r are able to easily checkpoint
// and restore the process's IO without depending on a host specific path or device
func setupProcessPipes(p *libcontainer.Process, rootuid, rootgid int) (*tty, error) {
i, err := p.InitializeIO(rootuid, rootgid)
if err != nil {
return nil, err
}
t := &tty{
closers: []io.Closer{
i.Stdin,
i.Stdout,
i.Stderr,
},
}
// add the process's io to the post start closers if they support close
for _, cc := range []interface{}{
p.Stdin,
p.Stdout,
p.Stderr,
} {
if c, ok := cc.(io.Closer); ok {
t.postStart = append(t.postStart, c)
}
}
go func() {
io.Copy(i.Stdin, os.Stdin)
i.Stdin.Close()
}()
t.wg.Add(2)
go t.copyIO(os.Stdout, i.Stdout)
go t.copyIO(os.Stderr, i.Stderr)
return t, nil
}
func inheritStdio(process *libcontainer.Process) error {
process.Stdin = os.Stdin
process.Stdout = os.Stdout
process.Stderr = os.Stderr
return nil
}
func (t *tty) recvtty(process *libcontainer.Process, socket *os.File) error {
f, err := utils.RecvFd(socket)
if err != nil {
return err
}
cons, err := console.ConsoleFromFile(f)
if err != nil {
return err
}
console.ClearONLCR(cons.Fd())
epoller, err := console.NewEpoller()
if err != nil {
return err
}
epollConsole, err := epoller.Add(cons)
if err != nil {
return err
}
go epoller.Wait()
go io.Copy(epollConsole, os.Stdin)
t.wg.Add(1)
go t.copyIO(os.Stdout, epollConsole)
// set raw mode to stdin and also handle interrupt
stdin, err := console.ConsoleFromFile(os.Stdin)
if err != nil {
return err
}
if err := stdin.SetRaw(); err != nil {
return fmt.Errorf("failed to set the terminal from the stdin: %v", err)
}
go handleInterrupt(stdin)
t.epoller = epoller
t.stdin = stdin
t.console = epollConsole
t.closers = []io.Closer{epollConsole}
return nil
}
func handleInterrupt(c console.Console) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, os.Interrupt)
<-sigchan
c.Reset()
os.Exit(0)
}
func (t *tty) waitConsole() error {
if t.consoleC != nil {
return <-t.consoleC
}
return nil
}
// ClosePostStart closes any fds that are provided to the container and dup2'd
// so that we no longer have copy in our process.
func (t *tty) ClosePostStart() error {
for _, c := range t.postStart {
c.Close()
}
return nil
}
// Close closes all open fds for the tty and/or restores the orignal
// stdin state to what it was prior to the container execution
func (t *tty) Close() error {
// ensure that our side of the fds are always closed
for _, c := range t.postStart {
c.Close()
}
// the process is gone at this point, shutting down the console if we have
// one and wait for all IO to be finished
if t.console != nil && t.epoller != nil {
t.console.Shutdown(t.epoller.CloseConsole)
}
t.wg.Wait()
for _, c := range t.closers {
c.Close()
}
if t.stdin != nil {
t.stdin.Reset()
}
return nil
}
func (t *tty) resize() error {
if t.console == nil {
return nil
}
return t.console.ResizeFrom(console.Current())
}
| 1 | 16,829 | I think this should've been done with the `defer`-named-return trick. I'll submit a PR for it. | opencontainers-runc | go |
@@ -1048,6 +1048,12 @@ stats (config.stat_config),
vote_uniquer (block_uniquer),
startup_time (std::chrono::steady_clock::now ())
{
+ if (config.websocket_config.enabled)
+ {
+ websocket_server = std::make_shared<nano::websocket::listener> (*this, nano::tcp_endpoint (boost::asio::ip::address_v6::any (), config.websocket_config.port));
+ this->websocket_server->run ();
+ }
+
wallets.observer = [this](bool active) {
observers.wallet.notify (active);
}; | 1 | #include <nano/node/node.hpp>
#include <nano/lib/interface.h>
#include <nano/lib/timer.hpp>
#include <nano/lib/utility.hpp>
#include <nano/node/common.hpp>
#include <nano/node/rpc.hpp>
#include <algorithm>
#include <cstdlib>
#include <future>
#include <sstream>
#include <boost/polymorphic_cast.hpp>
#include <boost/property_tree/json_parser.hpp>
double constexpr nano::node::price_max;
double constexpr nano::node::free_cutoff;
size_t constexpr nano::active_transactions::max_broadcast_queue;
size_t constexpr nano::block_arrival::arrival_size_min;
std::chrono::seconds constexpr nano::block_arrival::arrival_time_min;
namespace nano
{
extern unsigned char nano_bootstrap_weights_live[];
extern size_t nano_bootstrap_weights_live_size;
extern unsigned char nano_bootstrap_weights_beta[];
extern size_t nano_bootstrap_weights_beta_size;
}
nano::network::network (nano::node & node_a, uint16_t port_a) :
buffer_container (node_a.stats, nano::network::buffer_size, 4096), // 2Mb receive buffer
resolver (node_a.io_ctx),
node (node_a),
udp_channels (node_a, port_a),
disconnect_observer ([]() {})
{
boost::thread::attributes attrs;
nano::thread_attributes::set (attrs);
for (size_t i = 0; i < node.config.network_threads; ++i)
{
packet_processing_threads.push_back (boost::thread (attrs, [this]() {
nano::thread_role::set (nano::thread_role::name::packet_processing);
try
{
udp_channels.process_packets ();
}
catch (boost::system::error_code & ec)
{
this->node.logger.try_log (FATAL_LOG_PREFIX, ec.message ());
release_assert (false);
}
catch (std::error_code & ec)
{
this->node.logger.try_log (FATAL_LOG_PREFIX, ec.message ());
release_assert (false);
}
catch (std::runtime_error & err)
{
this->node.logger.try_log (FATAL_LOG_PREFIX, err.what ());
release_assert (false);
}
catch (...)
{
this->node.logger.try_log (FATAL_LOG_PREFIX, "Unknown exception");
release_assert (false);
}
if (this->node.config.logging.network_packet_logging ())
{
this->node.logger.try_log ("Exiting packet processing thread");
}
}));
}
}
nano::network::~network ()
{
for (auto & thread : packet_processing_threads)
{
thread.join ();
}
}
void nano::network::start ()
{
ongoing_cleanup ();
udp_channels.start ();
}
void nano::network::stop ()
{
udp_channels.stop ();
resolver.cancel ();
buffer_container.stop ();
}
void nano::network::send_keepalive (nano::transport::channel const & channel_a)
{
nano::keepalive message;
udp_channels.random_fill (message.peers);
channel_a.send (message);
}
void nano::node::keepalive (std::string const & address_a, uint16_t port_a, bool preconfigured_peer_a)
{
auto node_l (shared_from_this ());
network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (address_a, std::to_string (port_a)), [node_l, address_a, port_a, preconfigured_peer_a](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) {
if (!ec)
{
for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i)
{
auto endpoint (nano::transport::map_endpoint_to_v6 (i->endpoint ()));
nano::transport::channel_udp channel (node_l->network.udp_channels, endpoint);
node_l->network.send_keepalive (channel);
}
}
else
{
node_l->logger.try_log (boost::str (boost::format ("Error resolving address: %1%:%2%: %3%") % address_a % port_a % ec.message ()));
}
});
}
void nano::network::send_node_id_handshake (nano::endpoint const & endpoint_a, boost::optional<nano::uint256_union> const & query, boost::optional<nano::uint256_union> const & respond_to)
{
boost::optional<std::pair<nano::account, nano::signature>> response (boost::none);
if (respond_to)
{
response = std::make_pair (node.node_id.pub, nano::sign_message (node.node_id.prv, node.node_id.pub, *respond_to));
assert (!nano::validate_message (response->first, *respond_to, response->second));
}
nano::node_id_handshake message (query, response);
if (node.config.logging.network_node_id_handshake_logging ())
{
node.logger.try_log (boost::str (boost::format ("Node ID handshake sent with node ID %1% to %2%: query %3%, respond_to %4% (signature %5%)") % node.node_id.pub.to_account () % endpoint_a % (query ? query->to_string () : std::string ("[none]")) % (respond_to ? respond_to->to_string () : std::string ("[none]")) % (response ? response->second.to_string () : std::string ("[none]"))));
}
nano::transport::channel_udp channel (udp_channels, endpoint_a);
channel.send (message);
}
template <typename T>
bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, T & list_a, std::shared_ptr<nano::block> block_a, bool also_publish)
{
bool result (false);
if (node_a.config.enable_voting)
{
auto hash (block_a->hash ());
// Search in cache
auto votes (node_a.votes_cache.find (hash));
if (votes.empty ())
{
// Generate new vote
node_a.wallets.foreach_representative (transaction_a, [&result, &list_a, &node_a, &transaction_a, &hash](nano::public_key const & pub_a, nano::raw_key const & prv_a) {
result = true;
auto vote (node_a.store.vote_generate (transaction_a, pub_a, prv_a, std::vector<nano::block_hash> (1, hash)));
nano::confirm_ack confirm (vote);
auto vote_bytes = confirm.to_bytes ();
for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j)
{
j->get ().send_buffer (vote_bytes, nano::stat::detail::confirm_ack);
}
node_a.votes_cache.add (vote);
});
}
else
{
// Send from cache
for (auto & vote : votes)
{
nano::confirm_ack confirm (vote);
auto vote_bytes = confirm.to_bytes ();
for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j)
{
j->get ().send_buffer (vote_bytes, nano::stat::detail::confirm_ack);
}
}
}
// Republish if required
if (also_publish)
{
nano::publish publish (block_a);
auto publish_bytes (publish.to_bytes ());
for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j)
{
j->get ().send_buffer (publish_bytes, nano::stat::detail::publish);
}
}
}
return result;
}
bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, nano::transport::channel const & channel_a, std::shared_ptr<nano::block> block_a, bool also_publish)
{
std::array<std::reference_wrapper<nano::transport::channel const>, 1> endpoints = { channel_a };
auto result (confirm_block (transaction_a, node_a, endpoints, std::move (block_a), also_publish));
return result;
}
void nano::network::confirm_hashes (nano::transaction const & transaction_a, nano::transport::channel const & channel_a, std::vector<nano::block_hash> blocks_bundle_a)
{
if (node.config.enable_voting)
{
node.wallets.foreach_representative (transaction_a, [this, &blocks_bundle_a, &channel_a, &transaction_a](nano::public_key const & pub_a, nano::raw_key const & prv_a) {
auto vote (this->node.store.vote_generate (transaction_a, pub_a, prv_a, blocks_bundle_a));
nano::confirm_ack confirm (vote);
std::shared_ptr<std::vector<uint8_t>> bytes (new std::vector<uint8_t>);
{
nano::vectorstream stream (*bytes);
confirm.serialize (stream);
}
channel_a.send_buffer (bytes, nano::stat::detail::confirm_ack);
this->node.votes_cache.add (vote);
});
}
}
bool nano::network::send_votes_cache (nano::transport::channel const & channel_a, nano::block_hash const & hash_a)
{
// Search in cache
auto votes (node.votes_cache.find (hash_a));
// Send from cache
for (auto & vote : votes)
{
nano::confirm_ack confirm (vote);
auto vote_bytes = confirm.to_bytes ();
channel_a.send_buffer (vote_bytes, nano::stat::detail::confirm_ack);
}
// Returns true if votes were sent
bool result (!votes.empty ());
return result;
}
void nano::network::flood_message (nano::message const & message_a)
{
auto list (node.network.udp_channels.list_fanout ());
for (auto i (list.begin ()), n (list.end ()); i != n; ++i)
{
(*i)->send (message_a);
}
}
void nano::network::flood_block_batch (std::deque<std::shared_ptr<nano::block>> blocks_a, unsigned delay_a)
{
auto block (blocks_a.front ());
blocks_a.pop_front ();
flood_block (block);
if (!blocks_a.empty ())
{
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, blocks_a, delay_a]() {
if (auto node_l = node_w.lock ())
{
node_l->network.flood_block_batch (blocks_a, delay_a);
}
});
}
}
void nano::network::broadcast_confirm_req (std::shared_ptr<nano::block> block_a)
{
auto list (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> (node.rep_crawler.representative_endpoints (std::numeric_limits<size_t>::max ())));
if (list->empty () || node.rep_crawler.total_weight () < node.config.online_weight_minimum.number ())
{
// broadcast request to all peers (with max limit 2 * sqrt (peers count))
auto peers (node.network.udp_channels.list (std::min (static_cast<size_t> (100), 2 * node.network.size_sqrt ())));
list->clear ();
for (auto & peer : peers)
{
list->push_back (peer);
}
}
/*
* In either case (broadcasting to all representatives, or broadcasting to
* all peers because there are not enough connected representatives),
* limit each instance to a single random up-to-32 selection. The invoker
* of "broadcast_confirm_req" will be responsible for calling it again
* if the votes for a block have not arrived in time.
*/
const size_t max_endpoints = 32;
random_pool::shuffle (list->begin (), list->end ());
if (list->size () > max_endpoints)
{
list->erase (list->begin () + max_endpoints, list->end ());
}
broadcast_confirm_req_base (block_a, list, 0);
}
void nano::network::broadcast_confirm_req_base (std::shared_ptr<nano::block> block_a, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>> endpoints_a, unsigned delay_a, bool resumption)
{
const size_t max_reps = 10;
if (!resumption && node.config.logging.network_logging ())
{
node.logger.try_log (boost::str (boost::format ("Broadcasting confirm req for block %1% to %2% representatives") % block_a->hash ().to_string () % endpoints_a->size ()));
}
auto count (0);
while (!endpoints_a->empty () && count < max_reps)
{
nano::confirm_req req (block_a);
endpoints_a->back ()->send (req);
endpoints_a->pop_back ();
count++;
}
if (!endpoints_a->empty ())
{
delay_a += std::rand () % broadcast_interval_ms;
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, block_a, endpoints_a, delay_a]() {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_base (block_a, endpoints_a, delay_a, true);
}
});
}
}
void nano::network::broadcast_confirm_req_batch (std::unordered_map<std::shared_ptr<nano::transport::channel>, std::vector<std::pair<nano::block_hash, nano::block_hash>>> request_bundle_a, unsigned delay_a, bool resumption)
{
const size_t max_reps = 10;
if (!resumption && node.config.logging.network_logging ())
{
node.logger.try_log (boost::str (boost::format ("Broadcasting batch confirm req to %1% representatives") % request_bundle_a.size ()));
}
auto count (0);
while (!request_bundle_a.empty () && count < max_reps)
{
auto j (request_bundle_a.begin ());
count++;
std::vector<std::pair<nano::block_hash, nano::block_hash>> roots_hashes;
// Limit max request size hash + root to 6 pairs
while (roots_hashes.size () <= confirm_req_hashes_max && !j->second.empty ())
{
roots_hashes.push_back (j->second.back ());
j->second.pop_back ();
}
nano::confirm_req req (roots_hashes);
j->first->send (req);
if (j->second.empty ())
{
request_bundle_a.erase (j);
}
}
if (!request_bundle_a.empty ())
{
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, request_bundle_a, delay_a]() {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_batch (request_bundle_a, delay_a + 50, true);
}
});
}
}
void nano::network::broadcast_confirm_req_batch (std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> deque_a, unsigned delay_a)
{
auto pair (deque_a.front ());
deque_a.pop_front ();
auto block (pair.first);
// confirm_req to representatives
auto endpoints (pair.second);
if (!endpoints->empty ())
{
broadcast_confirm_req_base (block, endpoints, delay_a);
}
/* Continue while blocks remain
Broadcast with random delay between delay_a & 2*delay_a */
if (!deque_a.empty ())
{
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, deque_a, delay_a]() {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_batch (deque_a, delay_a);
}
});
}
}
namespace
{
class network_message_visitor : public nano::message_visitor
{
public:
network_message_visitor (nano::node & node_a, std::shared_ptr<nano::transport::channel> channel_a) :
node (node_a),
channel (channel_a)
{
}
void keepalive (nano::keepalive const & message_a) override
{
if (node.config.logging.network_keepalive_logging ())
{
node.logger.try_log (boost::str (boost::format ("Received keepalive message from %1%") % channel->to_string ()));
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in);
node.network.merge_peers (message_a.peers);
}
void publish (nano::publish const & message_a) override
{
if (node.config.logging.network_message_logging ())
{
node.logger.try_log (boost::str (boost::format ("Publish message from %1% for %2%") % channel->to_string () % message_a.block->hash ().to_string ()));
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in);
if (!node.block_processor.full ())
{
node.process_active (message_a.block);
}
node.active.publish (message_a.block);
}
void confirm_req (nano::confirm_req const & message_a) override
{
if (node.config.logging.network_message_logging ())
{
if (!message_a.roots_hashes.empty ())
{
node.logger.try_log (boost::str (boost::format ("Confirm_req message from %1% for hashes:roots %2%") % channel->to_string () % message_a.roots_string ()));
}
else
{
node.logger.try_log (boost::str (boost::format ("Confirm_req message from %1% for %2%") % channel->to_string () % message_a.block->hash ().to_string ()));
}
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_req, nano::stat::dir::in);
// Don't load nodes with disabled voting
if (node.config.enable_voting && node.wallets.reps_count)
{
if (message_a.block != nullptr)
{
auto hash (message_a.block->hash ());
if (!node.network.send_votes_cache (*channel, hash))
{
auto transaction (node.store.tx_begin_read ());
auto successor (node.ledger.successor (transaction, nano::uint512_union (message_a.block->previous (), message_a.block->root ())));
if (successor != nullptr)
{
auto same_block (successor->hash () == hash);
confirm_block (transaction, node, std::cref (*channel), std::move (successor), !same_block);
}
}
}
else if (!message_a.roots_hashes.empty ())
{
auto transaction (node.store.tx_begin_read ());
std::vector<nano::block_hash> blocks_bundle;
for (auto & root_hash : message_a.roots_hashes)
{
if (!node.network.send_votes_cache (*channel, root_hash.first) && node.store.block_exists (transaction, root_hash.first))
{
blocks_bundle.push_back (root_hash.first);
}
else
{
nano::block_hash successor (0);
// Search for block root
successor = node.store.block_successor (transaction, root_hash.second);
// Search for account root
if (successor.is_zero () && node.store.account_exists (transaction, root_hash.second))
{
nano::account_info info;
auto error (node.store.account_get (transaction, root_hash.second, info));
assert (!error);
successor = info.open_block;
}
if (!successor.is_zero ())
{
if (!node.network.send_votes_cache (*channel, successor))
{
blocks_bundle.push_back (successor);
}
auto successor_block (node.store.block_get (transaction, successor));
assert (successor_block != nullptr);
nano::publish publish (successor_block);
channel->send (publish);
}
}
}
if (!blocks_bundle.empty ())
{
node.network.confirm_hashes (transaction, *channel, blocks_bundle);
}
}
}
}
void confirm_ack (nano::confirm_ack const & message_a) override
{
if (node.config.logging.network_message_logging ())
{
node.logger.try_log (boost::str (boost::format ("Received confirm_ack message from %1% for %2%sequence %3%") % channel->to_string () % message_a.vote->hashes_string () % std::to_string (message_a.vote->sequence)));
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::in);
for (auto & vote_block : message_a.vote->blocks)
{
if (!vote_block.which ())
{
auto block (boost::get<std::shared_ptr<nano::block>> (vote_block));
if (!node.block_processor.full ())
{
node.process_active (block);
}
node.active.publish (block);
}
}
node.vote_processor.vote (message_a.vote, channel);
}
void bulk_pull (nano::bulk_pull const &) override
{
assert (false);
}
void bulk_pull_account (nano::bulk_pull_account const &) override
{
assert (false);
}
void bulk_push (nano::bulk_push const &) override
{
assert (false);
}
void frontier_req (nano::frontier_req const &) override
{
assert (false);
}
void node_id_handshake (nano::node_id_handshake const & message_a) override
{
node.stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in);
}
nano::node & node;
std::shared_ptr<nano::transport::channel> channel;
};
}
// Send keepalives to all the peers we've been notified of
void nano::network::merge_peers (std::array<nano::endpoint, 8> const & peers_a)
{
for (auto i (peers_a.begin ()), j (peers_a.end ()); i != j; ++i)
{
if (!udp_channels.reachout (*i, node.config.allow_local_peers))
{
nano::transport::channel_udp channel (node.network.udp_channels, *i);
send_keepalive (channel);
}
}
}
bool nano::operation::operator> (nano::operation const & other_a) const
{
return wakeup > other_a.wakeup;
}
nano::alarm::alarm (boost::asio::io_context & io_ctx_a) :
io_ctx (io_ctx_a),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::alarm);
run ();
})
{
}
nano::alarm::~alarm ()
{
add (std::chrono::steady_clock::now (), nullptr);
thread.join ();
}
void nano::alarm::run ()
{
std::unique_lock<std::mutex> lock (mutex);
auto done (false);
while (!done)
{
if (!operations.empty ())
{
auto & operation (operations.top ());
if (operation.function)
{
if (operation.wakeup <= std::chrono::steady_clock::now ())
{
io_ctx.post (operation.function);
operations.pop ();
}
else
{
auto wakeup (operation.wakeup);
condition.wait_until (lock, wakeup);
}
}
else
{
done = true;
}
}
else
{
condition.wait (lock);
}
}
}
void nano::alarm::add (std::chrono::steady_clock::time_point const & wakeup_a, std::function<void()> const & operation)
{
{
std::lock_guard<std::mutex> lock (mutex);
operations.push (nano::operation ({ wakeup_a, operation }));
}
condition.notify_all ();
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (alarm & alarm, const std::string & name)
{
auto composite = std::make_unique<seq_con_info_composite> (name);
size_t count = 0;
{
std::lock_guard<std::mutex> guard (alarm.mutex);
count = alarm.operations.size ();
}
auto sizeof_element = sizeof (decltype (alarm.operations)::value_type);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "operations", count, sizeof_element }));
return composite;
}
}
nano::node_init::node_init () :
block_store_init (false),
wallet_init (false)
{
}
bool nano::node_init::error ()
{
return block_store_init || wallet_init || wallets_store_init;
}
nano::vote_processor::vote_processor (nano::node & node_a) :
node (node_a),
started (false),
stopped (false),
active (false),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::vote_processing);
process_loop ();
})
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
}
void nano::vote_processor::process_loop ()
{
std::chrono::steady_clock::time_point start_time, end_time;
std::chrono::steady_clock::duration elapsed_time;
std::chrono::milliseconds elapsed_time_ms;
uint64_t elapsed_time_ms_int;
bool log_this_iteration;
std::unique_lock<std::mutex> lock (mutex);
started = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
while (!stopped)
{
if (!votes.empty ())
{
std::deque<std::pair<std::shared_ptr<nano::vote>, std::shared_ptr<nano::transport::channel>>> votes_l;
votes_l.swap (votes);
log_this_iteration = false;
if (node.config.logging.network_logging () && votes_l.size () > 50)
{
/*
* Only log the timing information for this iteration if
* there are a sufficient number of items for it to be relevant
*/
log_this_iteration = true;
start_time = std::chrono::steady_clock::now ();
}
active = true;
lock.unlock ();
verify_votes (votes_l);
{
std::unique_lock<std::mutex> active_single_lock (node.active.mutex);
auto transaction (node.store.tx_begin_read ());
uint64_t count (1);
for (auto & i : votes_l)
{
vote_blocking (transaction, i.first, i.second, true);
// Free active_transactions mutex each 100 processed votes
if (count % 100 == 0)
{
active_single_lock.unlock ();
active_single_lock.lock ();
}
count++;
}
}
lock.lock ();
active = false;
lock.unlock ();
condition.notify_all ();
lock.lock ();
if (log_this_iteration)
{
end_time = std::chrono::steady_clock::now ();
elapsed_time = end_time - start_time;
elapsed_time_ms = std::chrono::duration_cast<std::chrono::milliseconds> (elapsed_time);
elapsed_time_ms_int = elapsed_time_ms.count ();
if (elapsed_time_ms_int >= 100)
{
/*
* If the time spent was less than 100ms then
* the results are probably not useful as well,
* so don't spam the logs.
*/
node.logger.try_log (boost::str (boost::format ("Processed %1% votes in %2% milliseconds (rate of %3% votes per second)") % votes_l.size () % elapsed_time_ms_int % ((votes_l.size () * 1000ULL) / elapsed_time_ms_int)));
}
}
}
else
{
condition.wait (lock);
}
}
}
void nano::vote_processor::vote (std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> channel_a)
{
std::unique_lock<std::mutex> lock (mutex);
if (!stopped)
{
bool process (false);
/* Random early delection levels
Always process votes for test network (process = true)
Stop processing with max 144 * 1024 votes */
if (!node.network_params.is_test_network ())
{
// Level 0 (< 0.1%)
if (votes.size () < 96 * 1024)
{
process = true;
}
// Level 1 (0.1-1%)
else if (votes.size () < 112 * 1024)
{
process = (representatives_1.find (vote_a->account) != representatives_1.end ());
}
// Level 2 (1-5%)
else if (votes.size () < 128 * 1024)
{
process = (representatives_2.find (vote_a->account) != representatives_2.end ());
}
// Level 3 (> 5%)
else if (votes.size () < 144 * 1024)
{
process = (representatives_3.find (vote_a->account) != representatives_3.end ());
}
}
else
{
// Process for test network
process = true;
}
if (process)
{
votes.push_back (std::make_pair (vote_a, channel_a));
lock.unlock ();
condition.notify_all ();
lock.lock ();
}
else
{
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_overflow);
}
}
}
void nano::vote_processor::verify_votes (std::deque<std::pair<std::shared_ptr<nano::vote>, std::shared_ptr<nano::transport::channel>>> & votes_a)
{
auto size (votes_a.size ());
std::vector<unsigned char const *> messages;
messages.reserve (size);
std::vector<nano::uint256_union> hashes;
hashes.reserve (size);
std::vector<size_t> lengths (size, sizeof (nano::uint256_union));
std::vector<unsigned char const *> pub_keys;
pub_keys.reserve (size);
std::vector<unsigned char const *> signatures;
signatures.reserve (size);
std::vector<int> verifications;
verifications.resize (size);
for (auto & vote : votes_a)
{
hashes.push_back (vote.first->hash ());
messages.push_back (hashes.back ().bytes.data ());
pub_keys.push_back (vote.first->account.bytes.data ());
signatures.push_back (vote.first->signature.bytes.data ());
}
nano::signature_check_set check = { size, messages.data (), lengths.data (), pub_keys.data (), signatures.data (), verifications.data () };
node.checker.verify (check);
std::remove_reference_t<decltype (votes_a)> result;
auto i (0);
for (auto & vote : votes_a)
{
assert (verifications[i] == 1 || verifications[i] == 0);
if (verifications[i] == 1)
{
result.push_back (vote);
}
++i;
}
votes_a.swap (result);
}
// node.active.mutex lock required
nano::vote_code nano::vote_processor::vote_blocking (nano::transaction const & transaction_a, std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> channel_a, bool validated)
{
assert (!node.active.mutex.try_lock ());
auto result (nano::vote_code::invalid);
if (validated || !vote_a->validate ())
{
auto max_vote (node.store.vote_max (transaction_a, vote_a));
result = nano::vote_code::replay;
if (!node.active.vote (vote_a, true))
{
result = nano::vote_code::vote;
}
switch (result)
{
case nano::vote_code::vote:
node.observers.vote.notify (transaction_a, vote_a, channel_a);
case nano::vote_code::replay:
// This tries to assist rep nodes that have lost track of their highest sequence number by replaying our highest known vote back to them
// Only do this if the sequence number is significantly different to account for network reordering
// Amplify attack considerations: We're sending out a confirm_ack in response to a confirm_ack for no net traffic increase
if (max_vote->sequence > vote_a->sequence + 10000)
{
nano::confirm_ack confirm (max_vote);
channel_a->send_buffer (confirm.to_bytes (), nano::stat::detail::confirm_ack);
}
break;
case nano::vote_code::invalid:
assert (false);
break;
}
}
std::string status;
switch (result)
{
case nano::vote_code::invalid:
status = "Invalid";
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_invalid);
break;
case nano::vote_code::replay:
status = "Replay";
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_replay);
break;
case nano::vote_code::vote:
status = "Vote";
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_valid);
break;
}
if (node.config.logging.vote_logging ())
{
node.logger.try_log (boost::str (boost::format ("Vote from: %1% sequence: %2% block(s): %3%status: %4%") % vote_a->account.to_account () % std::to_string (vote_a->sequence) % vote_a->hashes_string () % status));
}
return result;
}
void nano::vote_processor::stop ()
{
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
}
void nano::vote_processor::flush ()
{
std::unique_lock<std::mutex> lock (mutex);
while (active || !votes.empty ())
{
condition.wait (lock);
}
}
void nano::vote_processor::calculate_weights ()
{
std::unique_lock<std::mutex> lock (mutex);
if (!stopped)
{
representatives_1.clear ();
representatives_2.clear ();
representatives_3.clear ();
auto supply (node.online_reps.online_stake ());
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.representation_begin (transaction)), n (node.store.representation_end ()); i != n; ++i)
{
nano::account representative (i->first);
auto weight (node.ledger.weight (transaction, representative));
if (weight > supply / 1000) // 0.1% or above (level 1)
{
representatives_1.insert (representative);
if (weight > supply / 100) // 1% or above (level 2)
{
representatives_2.insert (representative);
if (weight > supply / 20) // 5% or above (level 3)
{
representatives_3.insert (representative);
}
}
}
}
}
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (node_observers & node_observers, const std::string & name)
{
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (collect_seq_con_info (node_observers.blocks, "blocks"));
composite->add_component (collect_seq_con_info (node_observers.wallet, "wallet"));
composite->add_component (collect_seq_con_info (node_observers.vote, "vote"));
composite->add_component (collect_seq_con_info (node_observers.account_balance, "account_balance"));
composite->add_component (collect_seq_con_info (node_observers.endpoint, "endpoint"));
composite->add_component (collect_seq_con_info (node_observers.disconnect, "disconnect"));
return composite;
}
std::unique_ptr<seq_con_info_component> collect_seq_con_info (vote_processor & vote_processor, const std::string & name)
{
size_t votes_count = 0;
size_t representatives_1_count = 0;
size_t representatives_2_count = 0;
size_t representatives_3_count = 0;
{
std::lock_guard<std::mutex> (vote_processor.mutex);
votes_count = vote_processor.votes.size ();
representatives_1_count = vote_processor.representatives_1.size ();
representatives_2_count = vote_processor.representatives_2.size ();
representatives_3_count = vote_processor.representatives_3.size ();
}
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "votes", votes_count, sizeof (decltype (vote_processor.votes)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_1", representatives_1_count, sizeof (decltype (vote_processor.representatives_1)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_2", representatives_2_count, sizeof (decltype (vote_processor.representatives_2)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_3", representatives_3_count, sizeof (decltype (vote_processor.representatives_3)::value_type) }));
return composite;
}
std::unique_ptr<seq_con_info_component> collect_seq_con_info (rep_crawler & rep_crawler, const std::string & name)
{
size_t count = 0;
{
std::lock_guard<std::mutex> guard (rep_crawler.active_mutex);
count = rep_crawler.active.size ();
}
auto sizeof_element = sizeof (decltype (rep_crawler.active)::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "active", count, sizeof_element }));
return composite;
}
std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_processor & block_processor, const std::string & name)
{
size_t state_blocks_count = 0;
size_t blocks_count = 0;
size_t blocks_hashes_count = 0;
size_t forced_count = 0;
size_t rolled_back_count = 0;
{
std::lock_guard<std::mutex> guard (block_processor.mutex);
state_blocks_count = block_processor.state_blocks.size ();
blocks_count = block_processor.blocks.size ();
blocks_hashes_count = block_processor.blocks_hashes.size ();
forced_count = block_processor.forced.size ();
rolled_back_count = block_processor.rolled_back.size ();
}
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "state_blocks", state_blocks_count, sizeof (decltype (block_processor.state_blocks)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", blocks_count, sizeof (decltype (block_processor.blocks)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks_hashes", blocks_hashes_count, sizeof (decltype (block_processor.blocks_hashes)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "forced", forced_count, sizeof (decltype (block_processor.forced)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "rolled_back", rolled_back_count, sizeof (decltype (block_processor.rolled_back)::value_type) }));
composite->add_component (collect_seq_con_info (block_processor.generator, "generator"));
return composite;
}
}
nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, uint16_t peering_port_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::logging const & logging_a, nano::work_pool & work_a) :
node (init_a, io_ctx_a, application_path_a, alarm_a, nano::node_config (peering_port_a, logging_a), work_a)
{
}
nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::node_config const & config_a, nano::work_pool & work_a, nano::node_flags flags_a) :
io_ctx (io_ctx_a),
config (config_a),
flags (flags_a),
alarm (alarm_a),
work (work_a),
logger (config_a.logging.min_time_between_log_output),
store_impl (std::make_unique<nano::mdb_store> (init_a.block_store_init, config.logging, application_path_a / "data.ldb", config_a.lmdb_max_dbs, !flags.disable_unchecked_drop, flags.sideband_batch_size)),
store (*store_impl),
wallets_store_impl (std::make_unique<nano::mdb_wallets_store> (init_a.wallets_store_init, application_path_a / "wallets.ldb", config_a.lmdb_max_dbs)),
wallets_store (*wallets_store_impl),
gap_cache (*this),
ledger (store, stats, config.epoch_block_link, config.epoch_block_signer),
active (*this),
network (*this, config.peering_port),
bootstrap_initiator (*this),
bootstrap (io_ctx_a, config.peering_port, *this),
application_path (application_path_a),
wallets (init_a.wallet_init, *this),
port_mapping (*this),
checker (config.signature_checker_threads),
vote_processor (*this),
rep_crawler (*this),
warmed_up (0),
block_processor (*this),
block_processor_thread ([this]() {
nano::thread_role::set (nano::thread_role::name::block_processing);
this->block_processor.process_blocks ();
}),
online_reps (*this, config.online_weight_minimum.number ()),
stats (config.stat_config),
vote_uniquer (block_uniquer),
startup_time (std::chrono::steady_clock::now ())
{
wallets.observer = [this](bool active) {
observers.wallet.notify (active);
};
network.channel_observer = [this](std::shared_ptr<nano::transport::channel> channel_a) {
observers.endpoint.notify (channel_a);
};
network.disconnect_observer = [this]() {
observers.disconnect.notify ();
};
if (!config.callback_address.empty ())
{
observers.blocks.add ([this](std::shared_ptr<nano::block> block_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a) {
if (this->block_arrival.recent (block_a->hash ()))
{
auto node_l (shared_from_this ());
background ([node_l, block_a, account_a, amount_a, is_state_send_a]() {
boost::property_tree::ptree event;
event.add ("account", account_a.to_account ());
event.add ("hash", block_a->hash ().to_string ());
std::string block_text;
block_a->serialize_json (block_text);
event.add ("block", block_text);
event.add ("amount", amount_a.to_string_dec ());
if (is_state_send_a)
{
event.add ("is_send", is_state_send_a);
event.add ("subtype", "send");
}
// Subtype field
else if (block_a->type () == nano::block_type::state)
{
if (block_a->link ().is_zero ())
{
event.add ("subtype", "change");
}
else if (amount_a == 0 && !node_l->ledger.epoch_link.is_zero () && node_l->ledger.is_epoch_link (block_a->link ()))
{
event.add ("subtype", "epoch");
}
else
{
event.add ("subtype", "receive");
}
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, event);
ostream.flush ();
auto body (std::make_shared<std::string> (ostream.str ()));
auto address (node_l->config.callback_address);
auto port (node_l->config.callback_port);
auto target (std::make_shared<std::string> (node_l->config.callback_target));
auto resolver (std::make_shared<boost::asio::ip::tcp::resolver> (node_l->io_ctx));
resolver->async_resolve (boost::asio::ip::tcp::resolver::query (address, std::to_string (port)), [node_l, address, port, target, body, resolver](boost::system::error_code const & ec, boost::asio::ip::tcp::resolver::iterator i_a) {
if (!ec)
{
node_l->do_rpc_callback (i_a, address, port, target, body, resolver);
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.always_log (boost::str (boost::format ("Error resolving callback: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
});
});
}
});
}
observers.endpoint.add ([this](std::shared_ptr<nano::transport::channel> channel_a) {
this->network.send_keepalive (*channel_a);
});
observers.vote.add ([this](nano::transaction const & transaction, std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> channel_a) {
this->gap_cache.vote (vote_a);
this->online_reps.observe (vote_a->account);
nano::uint128_t rep_weight;
nano::uint128_t min_rep_weight;
{
rep_weight = ledger.weight (transaction, vote_a->account);
min_rep_weight = online_reps.online_stake () / 1000;
}
if (rep_weight > min_rep_weight)
{
bool rep_crawler_exists (false);
for (auto hash : *vote_a)
{
if (this->rep_crawler.exists (hash))
{
rep_crawler_exists = true;
break;
}
}
if (rep_crawler_exists)
{
// We see a valid non-replay vote for a block we requested, this node is probably a representative
if (this->rep_crawler.response (channel_a, vote_a->account, rep_weight))
{
logger.try_log (boost::str (boost::format ("Found a representative at %1%") % channel_a->to_string ()));
// Rebroadcasting all active votes to new representative
auto blocks (this->active.list_blocks (true));
for (auto i (blocks.begin ()), n (blocks.end ()); i != n; ++i)
{
if (*i != nullptr)
{
nano::confirm_req req (*i);
channel_a->send (req);
}
}
}
}
}
});
if (NANO_VERSION_PATCH == 0)
{
logger.always_log ("Node starting, version: ", NANO_MAJOR_MINOR_VERSION);
}
else
{
logger.always_log ("Node starting, version: ", NANO_MAJOR_MINOR_RC_VERSION);
}
logger.always_log (boost::str (boost::format ("Work pool running %1% threads") % work.threads.size ()));
if (!init_a.error ())
{
if (config.logging.node_lifetime_tracing ())
{
logger.always_log ("Constructing node");
}
nano::genesis genesis;
auto transaction (store.tx_begin_write ());
if (store.latest_begin (transaction) == store.latest_end ())
{
// Store was empty meaning we just created it, add the genesis block
store.initialize (transaction, genesis);
}
if (!store.block_exists (transaction, genesis.hash ()))
{
logger.always_log ("Genesis block not found. Make sure the node network ID is correct.");
std::exit (1);
}
node_id = nano::keypair (store.get_node_id (transaction));
logger.always_log ("Node ID: ", node_id.pub.to_account ());
}
const uint8_t * weight_buffer = network_params.is_live_network () ? nano_bootstrap_weights_live : nano_bootstrap_weights_beta;
size_t weight_size = network_params.is_live_network () ? nano_bootstrap_weights_live_size : nano_bootstrap_weights_beta_size;
if (network_params.is_live_network () || network_params.is_beta_network ())
{
nano::bufferstream weight_stream ((const uint8_t *)weight_buffer, weight_size);
nano::uint128_union block_height;
if (!nano::try_read (weight_stream, block_height))
{
auto max_blocks = (uint64_t)block_height.number ();
auto transaction (store.tx_begin_read ());
if (ledger.store.block_count (transaction).sum () < max_blocks)
{
ledger.bootstrap_weight_max_blocks = max_blocks;
while (true)
{
nano::account account;
if (nano::try_read (weight_stream, account.bytes))
{
break;
}
nano::amount weight;
if (nano::try_read (weight_stream, weight.bytes))
{
break;
}
logger.always_log ("Using bootstrap rep weight: ", account.to_account (), " -> ", weight.format_balance (Mxrb_ratio, 0, true), " XRB");
ledger.bootstrap_weights[account] = weight.number ();
}
}
}
}
}
nano::node::~node ()
{
if (config.logging.node_lifetime_tracing ())
{
logger.always_log ("Destructing node");
}
stop ();
}
void nano::node::do_rpc_callback (boost::asio::ip::tcp::resolver::iterator i_a, std::string const & address, uint16_t port, std::shared_ptr<std::string> target, std::shared_ptr<std::string> body, std::shared_ptr<boost::asio::ip::tcp::resolver> resolver)
{
if (i_a != boost::asio::ip::tcp::resolver::iterator{})
{
auto node_l (shared_from_this ());
auto sock (std::make_shared<boost::asio::ip::tcp::socket> (node_l->io_ctx));
sock->async_connect (i_a->endpoint (), [node_l, target, body, sock, address, port, i_a, resolver](boost::system::error_code const & ec) mutable {
if (!ec)
{
auto req (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ());
req->method (boost::beast::http::verb::post);
req->target (*target);
req->version (11);
req->insert (boost::beast::http::field::host, address);
req->insert (boost::beast::http::field::content_type, "application/json");
req->body () = *body;
req->prepare_payload ();
boost::beast::http::async_write (*sock, *req, [node_l, sock, address, port, req, i_a, target, body, resolver](boost::system::error_code const & ec, size_t bytes_transferred) mutable {
if (!ec)
{
auto sb (std::make_shared<boost::beast::flat_buffer> ());
auto resp (std::make_shared<boost::beast::http::response<boost::beast::http::string_body>> ());
boost::beast::http::async_read (*sock, *sb, *resp, [node_l, sb, resp, sock, address, port, i_a, target, body, resolver](boost::system::error_code const & ec, size_t bytes_transferred) mutable {
if (!ec)
{
if (resp->result () == boost::beast::http::status::ok)
{
node_l->stats.inc (nano::stat::type::http_callback, nano::stat::detail::initiate, nano::stat::dir::out);
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Callback to %1%:%2% failed with status: %3%") % address % port % resp->result ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Unable complete callback: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
};
});
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Unable to send callback: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
});
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Unable to connect to callback address: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
++i_a;
node_l->do_rpc_callback (i_a, address, port, target, body, resolver);
}
});
}
}
bool nano::node::copy_with_compaction (boost::filesystem::path const & destination_file)
{
return !mdb_env_copy2 (boost::polymorphic_downcast<nano::mdb_store *> (store_impl.get ())->env.environment, destination_file.string ().c_str (), MDB_CP_COMPACT);
}
void nano::node::process_fork (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a)
{
auto root (block_a->root ());
if (!store.block_exists (transaction_a, block_a->type (), block_a->hash ()) && store.root_exists (transaction_a, block_a->root ()))
{
std::shared_ptr<nano::block> ledger_block (ledger.forked_block (transaction_a, *block_a));
if (ledger_block && !ledger.block_confirmed (transaction_a, ledger_block->hash ()))
{
std::weak_ptr<nano::node> this_w (shared_from_this ());
if (!active.start (ledger_block, [this_w, root](std::shared_ptr<nano::block>) {
if (auto this_l = this_w.lock ())
{
auto attempt (this_l->bootstrap_initiator.current_attempt ());
if (attempt && attempt->mode == nano::bootstrap_mode::legacy)
{
auto transaction (this_l->store.tx_begin_read ());
auto account (this_l->ledger.store.frontier_get (transaction, root));
if (!account.is_zero ())
{
attempt->requeue_pull (nano::pull_info (account, root, root));
}
else if (this_l->ledger.store.account_exists (transaction, root))
{
attempt->requeue_pull (nano::pull_info (root, nano::block_hash (0), nano::block_hash (0)));
}
}
}
}))
{
logger.always_log (boost::str (boost::format ("Resolving fork between our block: %1% and block %2% both with root %3%") % ledger_block->hash ().to_string () % block_a->hash ().to_string () % block_a->root ().to_string ()));
network.broadcast_confirm_req (ledger_block);
}
}
}
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (node & node, const std::string & name)
{
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (collect_seq_con_info (node.alarm, "alarm"));
composite->add_component (collect_seq_con_info (node.work, "work"));
composite->add_component (collect_seq_con_info (node.gap_cache, "gap_cache"));
composite->add_component (collect_seq_con_info (node.ledger, "ledger"));
composite->add_component (collect_seq_con_info (node.active, "active"));
composite->add_component (collect_seq_con_info (node.bootstrap_initiator, "bootstrap_initiator"));
composite->add_component (collect_seq_con_info (node.bootstrap, "bootstrap"));
composite->add_component (node.network.udp_channels.collect_seq_con_info ("udp_channels"));
composite->add_component (collect_seq_con_info (node.observers, "observers"));
composite->add_component (collect_seq_con_info (node.wallets, "wallets"));
composite->add_component (collect_seq_con_info (node.vote_processor, "vote_processor"));
composite->add_component (collect_seq_con_info (node.rep_crawler, "rep_crawler"));
composite->add_component (collect_seq_con_info (node.block_processor, "block_processor"));
composite->add_component (collect_seq_con_info (node.block_arrival, "block_arrival"));
composite->add_component (collect_seq_con_info (node.online_reps, "online_reps"));
composite->add_component (collect_seq_con_info (node.votes_cache, "votes_cache"));
composite->add_component (collect_seq_con_info (node.block_uniquer, "block_uniquer"));
composite->add_component (collect_seq_con_info (node.vote_uniquer, "vote_uniquer"));
return composite;
}
}
nano::gap_cache::gap_cache (nano::node & node_a) :
node (node_a)
{
}
void nano::gap_cache::add (nano::transaction const & transaction_a, nano::block_hash const & hash_a, std::chrono::steady_clock::time_point time_point_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (blocks.get<1> ().find (hash_a));
if (existing != blocks.get<1> ().end ())
{
blocks.get<1> ().modify (existing, [time_point_a](nano::gap_information & info) {
info.arrival = time_point_a;
});
}
else
{
blocks.insert ({ time_point_a, hash_a, std::unordered_set<nano::account> () });
if (blocks.size () > max)
{
blocks.get<0> ().erase (blocks.get<0> ().begin ());
}
}
}
void nano::gap_cache::vote (std::shared_ptr<nano::vote> vote_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto transaction (node.store.tx_begin_read ());
for (auto hash : *vote_a)
{
auto existing (blocks.get<1> ().find (hash));
if (existing != blocks.get<1> ().end ())
{
auto is_new (false);
blocks.get<1> ().modify (existing, [&](nano::gap_information & info) { is_new = info.voters.insert (vote_a->account).second; });
if (is_new)
{
uint128_t tally;
for (auto & voter : existing->voters)
{
tally += node.ledger.weight (transaction, voter);
}
bool start_bootstrap (false);
if (!node.flags.disable_lazy_bootstrap)
{
if (tally >= node.config.online_weight_minimum.number ())
{
start_bootstrap = true;
}
}
else if (!node.flags.disable_legacy_bootstrap && tally > bootstrap_threshold (transaction))
{
start_bootstrap = true;
}
if (start_bootstrap)
{
auto node_l (node.shared ());
auto now (std::chrono::steady_clock::now ());
node.alarm.add (node_l->network_params.is_test_network () ? now + std::chrono::milliseconds (5) : now + std::chrono::seconds (5), [node_l, hash]() {
auto transaction (node_l->store.tx_begin_read ());
if (!node_l->store.block_exists (transaction, hash))
{
if (!node_l->bootstrap_initiator.in_progress ())
{
node_l->logger.try_log (boost::str (boost::format ("Missing block %1% which has enough votes to warrant lazy bootstrapping it") % hash.to_string ()));
}
if (!node_l->flags.disable_lazy_bootstrap)
{
node_l->bootstrap_initiator.bootstrap_lazy (hash);
}
else if (!node_l->flags.disable_legacy_bootstrap)
{
node_l->bootstrap_initiator.bootstrap ();
}
}
});
}
}
}
}
}
nano::uint128_t nano::gap_cache::bootstrap_threshold (nano::transaction const & transaction_a)
{
auto result ((node.online_reps.online_stake () / 256) * node.config.bootstrap_fraction_numerator);
return result;
}
size_t nano::gap_cache::size ()
{
std::lock_guard<std::mutex> lock (mutex);
return blocks.size ();
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (gap_cache & gap_cache, const std::string & name)
{
auto count = gap_cache.size ();
auto sizeof_element = sizeof (decltype (gap_cache.blocks)::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", count, sizeof_element }));
return composite;
}
}
void nano::node::process_active (std::shared_ptr<nano::block> incoming)
{
block_arrival.add (incoming->hash ());
block_processor.add (incoming, nano::seconds_since_epoch ());
}
nano::process_return nano::node::process (nano::block const & block_a)
{
auto transaction (store.tx_begin_write ());
auto result (ledger.process (transaction, block_a));
return result;
}
void nano::node::start ()
{
network.start ();
add_initial_peers ();
if (!flags.disable_legacy_bootstrap)
{
ongoing_bootstrap ();
}
else if (!flags.disable_unchecked_cleanup)
{
ongoing_unchecked_cleanup ();
}
ongoing_store_flush ();
rep_crawler.start ();
ongoing_rep_calculation ();
ongoing_peer_store ();
ongoing_online_weight_calculation_queue ();
if (!flags.disable_bootstrap_listener)
{
bootstrap.start ();
}
if (!flags.disable_backup)
{
backup_wallet ();
}
search_pending ();
if (!flags.disable_wallet_bootstrap)
{
// Delay to start wallet lazy bootstrap
auto this_l (shared ());
alarm.add (std::chrono::steady_clock::now () + std::chrono::minutes (1), [this_l]() {
this_l->bootstrap_wallet ();
});
}
port_mapping.start ();
}
void nano::node::stop ()
{
logger.always_log ("Node stopping");
block_processor.stop ();
if (block_processor_thread.joinable ())
{
block_processor_thread.join ();
}
vote_processor.stop ();
active.stop ();
network.stop ();
bootstrap_initiator.stop ();
bootstrap.stop ();
port_mapping.stop ();
checker.stop ();
wallets.stop ();
}
void nano::node::keepalive_preconfigured (std::vector<std::string> const & peers_a)
{
for (auto i (peers_a.begin ()), n (peers_a.end ()); i != n; ++i)
{
keepalive (*i, network_params.default_node_port, true);
}
}
nano::block_hash nano::node::latest (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
return ledger.latest (transaction, account_a);
}
nano::uint128_t nano::node::balance (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
return ledger.account_balance (transaction, account_a);
}
std::shared_ptr<nano::block> nano::node::block (nano::block_hash const & hash_a)
{
auto transaction (store.tx_begin_read ());
return store.block_get (transaction, hash_a);
}
std::pair<nano::uint128_t, nano::uint128_t> nano::node::balance_pending (nano::account const & account_a)
{
std::pair<nano::uint128_t, nano::uint128_t> result;
auto transaction (store.tx_begin_read ());
result.first = ledger.account_balance (transaction, account_a);
result.second = ledger.account_pending (transaction, account_a);
return result;
}
nano::uint128_t nano::node::weight (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
return ledger.weight (transaction, account_a);
}
nano::account nano::node::representative (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
nano::account_info info;
nano::account result (0);
if (!store.account_get (transaction, account_a, info))
{
result = info.rep_block;
}
return result;
}
void nano::node::ongoing_rep_calculation ()
{
auto now (std::chrono::steady_clock::now ());
vote_processor.calculate_weights ();
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (now + std::chrono::minutes (10), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_rep_calculation ();
}
});
}
void nano::node::ongoing_bootstrap ()
{
auto next_wakeup (300);
if (warmed_up < 3)
{
// Re-attempt bootstrapping more aggressively on startup
next_wakeup = 5;
if (!bootstrap_initiator.in_progress () && !network.empty ())
{
++warmed_up;
}
}
bootstrap_initiator.bootstrap ();
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (next_wakeup), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_bootstrap ();
}
});
}
void nano::node::ongoing_store_flush ()
{
{
auto transaction (store.tx_begin_write ());
store.flush (transaction);
}
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_store_flush ();
}
});
}
void nano::node::ongoing_peer_store ()
{
network.udp_channels.store_all (*this);
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + network_params.node.peer_interval, [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_peer_store ();
}
});
}
void nano::node::backup_wallet ()
{
auto transaction (wallets.tx_begin_read ());
for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n; ++i)
{
boost::system::error_code error_chmod;
auto backup_path (application_path / "backup");
boost::filesystem::create_directories (backup_path);
nano::set_secure_perm_directory (backup_path, error_chmod);
i->second->store.write_backup (transaction, backup_path / (i->first.to_string () + ".json"));
}
auto this_l (shared ());
alarm.add (std::chrono::steady_clock::now () + network_params.node.backup_interval, [this_l]() {
this_l->backup_wallet ();
});
}
void nano::node::search_pending ()
{
// Reload wallets from disk
wallets.reload ();
// Search pending
wallets.search_pending_all ();
auto this_l (shared ());
alarm.add (std::chrono::steady_clock::now () + network_params.node.search_pending_interval, [this_l]() {
this_l->search_pending ();
});
}
void nano::node::bootstrap_wallet ()
{
std::deque<nano::account> accounts;
{
std::lock_guard<std::mutex> lock (wallets.mutex);
auto transaction (wallets.tx_begin_read ());
for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n && accounts.size () < 128; ++i)
{
auto & wallet (*i->second);
std::lock_guard<std::recursive_mutex> wallet_lock (wallet.store.mutex);
for (auto j (wallet.store.begin (transaction)), m (wallet.store.end ()); j != m && accounts.size () < 128; ++j)
{
nano::account account (j->first);
accounts.push_back (account);
}
}
}
bootstrap_initiator.bootstrap_wallet (accounts);
}
void nano::node::unchecked_cleanup ()
{
std::deque<nano::unchecked_key> cleaning_list;
// Collect old unchecked keys
{
auto now (nano::seconds_since_epoch ());
auto transaction (store.tx_begin_read ());
// Max 128k records to clean, max 2 minutes reading to prevent slow i/o systems start issues
for (auto i (store.unchecked_begin (transaction)), n (store.unchecked_end ()); i != n && cleaning_list.size () < 128 * 1024 && nano::seconds_since_epoch () - now < 120; ++i)
{
nano::unchecked_key key (i->first);
nano::unchecked_info info (i->second);
if ((now - info.modified) > config.unchecked_cutoff_time.count ())
{
cleaning_list.push_back (key);
}
}
}
// Delete old unchecked keys in batches
while (!cleaning_list.empty ())
{
size_t deleted_count (0);
auto transaction (store.tx_begin_write ());
while (deleted_count++ < 2 * 1024 && !cleaning_list.empty ())
{
auto key (cleaning_list.front ());
cleaning_list.pop_front ();
store.unchecked_del (transaction, key);
}
}
}
void nano::node::ongoing_unchecked_cleanup ()
{
if (!bootstrap_initiator.in_progress ())
{
unchecked_cleanup ();
}
auto this_l (shared ());
alarm.add (std::chrono::steady_clock::now () + network_params.node.unchecked_cleaning_interval, [this_l]() {
this_l->ongoing_unchecked_cleanup ();
});
}
int nano::node::price (nano::uint128_t const & balance_a, int amount_a)
{
assert (balance_a >= amount_a * nano::Gxrb_ratio);
auto balance_l (balance_a);
double result (0.0);
for (auto i (0); i < amount_a; ++i)
{
balance_l -= nano::Gxrb_ratio;
auto balance_scaled ((balance_l / nano::Mxrb_ratio).convert_to<double> ());
auto units (balance_scaled / 1000.0);
auto unit_price (((free_cutoff - units) / free_cutoff) * price_max);
result += std::min (std::max (0.0, unit_price), price_max);
}
return static_cast<int> (result * 100.0);
}
namespace
{
class work_request
{
public:
work_request (boost::asio::io_context & io_ctx_a, boost::asio::ip::address address_a, uint16_t port_a) :
address (address_a),
port (port_a),
socket (io_ctx_a)
{
}
boost::asio::ip::address address;
uint16_t port;
boost::beast::flat_buffer buffer;
boost::beast::http::response<boost::beast::http::string_body> response;
boost::asio::ip::tcp::socket socket;
};
class distributed_work : public std::enable_shared_from_this<distributed_work>
{
public:
distributed_work (std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) :
distributed_work (1, node_a, root_a, callback_a, difficulty_a)
{
assert (node_a != nullptr);
}
distributed_work (unsigned int backoff_a, std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) :
callback (callback_a),
backoff (backoff_a),
node (node_a),
root (root_a),
need_resolve (node_a->config.work_peers),
difficulty (difficulty_a)
{
assert (node_a != nullptr);
completed.clear ();
}
void start ()
{
if (need_resolve.empty ())
{
start_work ();
}
else
{
auto current (need_resolve.back ());
need_resolve.pop_back ();
auto this_l (shared_from_this ());
boost::system::error_code ec;
auto parsed_address (boost::asio::ip::address_v6::from_string (current.first, ec));
if (!ec)
{
outstanding[parsed_address] = current.second;
start ();
}
else
{
node->network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (current.first, std::to_string (current.second)), [current, this_l](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) {
if (!ec)
{
for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i)
{
auto endpoint (i->endpoint ());
this_l->outstanding[endpoint.address ()] = endpoint.port ();
}
}
else
{
this_l->node->logger.try_log (boost::str (boost::format ("Error resolving work peer: %1%:%2%: %3%") % current.first % current.second % ec.message ()));
}
this_l->start ();
});
}
}
}
void start_work ()
{
if (!outstanding.empty ())
{
auto this_l (shared_from_this ());
std::lock_guard<std::mutex> lock (mutex);
for (auto const & i : outstanding)
{
auto host (i.first);
auto service (i.second);
node->background ([this_l, host, service]() {
auto connection (std::make_shared<work_request> (this_l->node->io_ctx, host, service));
connection->socket.async_connect (nano::tcp_endpoint (host, service), [this_l, connection](boost::system::error_code const & ec) {
if (!ec)
{
std::string request_string;
{
boost::property_tree::ptree request;
request.put ("action", "work_generate");
request.put ("hash", this_l->root.to_string ());
request.put ("difficulty", nano::to_string_hex (this_l->difficulty));
std::stringstream ostream;
boost::property_tree::write_json (ostream, request);
request_string = ostream.str ();
}
auto request (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ());
request->method (boost::beast::http::verb::post);
request->target ("/");
request->version (11);
request->body () = request_string;
request->prepare_payload ();
boost::beast::http::async_write (connection->socket, *request, [this_l, connection, request](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
boost::beast::http::async_read (connection->socket, connection->buffer, connection->response, [this_l, connection](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
if (connection->response.result () == boost::beast::http::status::ok)
{
this_l->success (connection->response.body (), connection->address);
}
else
{
this_l->node->logger.try_log (boost::str (boost::format ("Work peer responded with an error %1% %2%: %3%") % connection->address % connection->port % connection->response.result ()));
this_l->failure (connection->address);
}
}
else
{
this_l->node->logger.try_log (boost::str (boost::format ("Unable to read from work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()));
this_l->failure (connection->address);
}
});
}
else
{
this_l->node->logger.try_log (boost::str (boost::format ("Unable to write to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()));
this_l->failure (connection->address);
}
});
}
else
{
this_l->node->logger.try_log (boost::str (boost::format ("Unable to connect to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()));
this_l->failure (connection->address);
}
});
});
}
}
else
{
handle_failure (true);
}
}
void stop ()
{
auto this_l (shared_from_this ());
std::lock_guard<std::mutex> lock (mutex);
for (auto const & i : outstanding)
{
auto host (i.first);
node->background ([this_l, host]() {
std::string request_string;
{
boost::property_tree::ptree request;
request.put ("action", "work_cancel");
request.put ("hash", this_l->root.to_string ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, request);
request_string = ostream.str ();
}
boost::beast::http::request<boost::beast::http::string_body> request;
request.method (boost::beast::http::verb::post);
request.target ("/");
request.version (11);
request.body () = request_string;
request.prepare_payload ();
auto socket (std::make_shared<boost::asio::ip::tcp::socket> (this_l->node->io_ctx));
boost::beast::http::async_write (*socket, request, [socket](boost::system::error_code const & ec, size_t bytes_transferred) {
});
});
}
outstanding.clear ();
}
void success (std::string const & body_a, boost::asio::ip::address const & address)
{
auto last (remove (address));
std::stringstream istream (body_a);
try
{
boost::property_tree::ptree result;
boost::property_tree::read_json (istream, result);
auto work_text (result.get<std::string> ("work"));
uint64_t work;
if (!nano::from_string_hex (work_text, work))
{
uint64_t result_difficulty (0);
if (!nano::work_validate (root, work, &result_difficulty) && result_difficulty >= difficulty)
{
set_once (work);
stop ();
}
else
{
node->logger.try_log (boost::str (boost::format ("Incorrect work response from %1% for root %2% with diffuculty %3%: %4%") % address % root.to_string () % nano::to_string_hex (difficulty) % work_text));
handle_failure (last);
}
}
else
{
node->logger.try_log (boost::str (boost::format ("Work response from %1% wasn't a number: %2%") % address % work_text));
handle_failure (last);
}
}
catch (...)
{
node->logger.try_log (boost::str (boost::format ("Work response from %1% wasn't parsable: %2%") % address % body_a));
handle_failure (last);
}
}
void set_once (uint64_t work_a)
{
if (!completed.test_and_set ())
{
callback (work_a);
}
}
void failure (boost::asio::ip::address const & address)
{
auto last (remove (address));
handle_failure (last);
}
void handle_failure (bool last)
{
if (last)
{
if (!completed.test_and_set ())
{
if (node->config.work_threads != 0 || node->work.opencl)
{
auto callback_l (callback);
// clang-format off
node->work.generate (root, [callback_l](boost::optional<uint64_t> const & work_a) {
callback_l (work_a.value ());
},
difficulty);
// clang-format on
}
else
{
if (backoff == 1 && node->config.logging.work_generation_time ())
{
node->logger.try_log ("Work peer(s) failed to generate work for root ", root.to_string (), ", retrying...");
}
auto now (std::chrono::steady_clock::now ());
auto root_l (root);
auto callback_l (callback);
std::weak_ptr<nano::node> node_w (node);
auto next_backoff (std::min (backoff * 2, (unsigned int)60 * 5));
// clang-format off
node->alarm.add (now + std::chrono::seconds (backoff), [ node_w, root_l, callback_l, next_backoff, difficulty = difficulty ] {
if (auto node_l = node_w.lock ())
{
auto work_generation (std::make_shared<distributed_work> (next_backoff, node_l, root_l, callback_l, difficulty));
work_generation->start ();
}
});
// clang-format on
}
}
}
}
bool remove (boost::asio::ip::address const & address)
{
std::lock_guard<std::mutex> lock (mutex);
outstanding.erase (address);
return outstanding.empty ();
}
std::function<void(uint64_t)> callback;
unsigned int backoff; // in seconds
std::shared_ptr<nano::node> node;
nano::block_hash root;
std::mutex mutex;
std::map<boost::asio::ip::address, uint16_t> outstanding;
std::vector<std::pair<std::string, uint16_t>> need_resolve;
std::atomic_flag completed;
uint64_t difficulty;
};
}
void nano::node::work_generate_blocking (nano::block & block_a)
{
work_generate_blocking (block_a, network_params.publish_threshold);
}
void nano::node::work_generate_blocking (nano::block & block_a, uint64_t difficulty_a)
{
block_a.block_work_set (work_generate_blocking (block_a.root (), difficulty_a));
}
void nano::node::work_generate (nano::uint256_union const & hash_a, std::function<void(uint64_t)> callback_a)
{
work_generate (hash_a, callback_a, network_params.publish_threshold);
}
void nano::node::work_generate (nano::uint256_union const & hash_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a)
{
auto work_generation (std::make_shared<distributed_work> (shared (), hash_a, callback_a, difficulty_a));
work_generation->start ();
}
uint64_t nano::node::work_generate_blocking (nano::uint256_union const & block_a)
{
return work_generate_blocking (block_a, network_params.publish_threshold);
}
uint64_t nano::node::work_generate_blocking (nano::uint256_union const & hash_a, uint64_t difficulty_a)
{
std::promise<uint64_t> promise;
std::future<uint64_t> future = promise.get_future ();
// clang-format off
work_generate (hash_a, [&promise](uint64_t work_a) {
promise.set_value (work_a);
},
difficulty_a);
// clang-format on
return future.get ();
}
void nano::node::add_initial_peers ()
{
auto transaction (store.tx_begin_read ());
for (auto i (store.peers_begin (transaction)), n (store.peers_end ()); i != n; ++i)
{
nano::endpoint endpoint (boost::asio::ip::address_v6 (i->first.address_bytes ()), i->first.port ());
if (!network.udp_channels.reachout (endpoint, config.allow_local_peers))
{
auto channel (std::make_shared<nano::transport::channel_udp> (network.udp_channels, endpoint));
network.send_keepalive (*channel);
rep_crawler.query (channel);
}
}
}
void nano::node::block_confirm (std::shared_ptr<nano::block> block_a)
{
active.start (block_a);
network.broadcast_confirm_req (block_a);
// Calculate votes for local representatives
if (config.enable_voting && active.active (*block_a))
{
block_processor.generator.add (block_a->hash ());
}
}
nano::uint128_t nano::node::delta ()
{
auto result ((online_reps.online_stake () / 100) * config.online_weight_quorum);
return result;
}
void nano::node::ongoing_online_weight_calculation_queue ()
{
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + (std::chrono::seconds (network_params.node.weight_period)), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_online_weight_calculation ();
}
});
}
void nano::node::ongoing_online_weight_calculation ()
{
online_reps.sample ();
ongoing_online_weight_calculation_queue ();
}
namespace
{
class confirmed_visitor : public nano::block_visitor
{
public:
confirmed_visitor (nano::transaction const & transaction_a, nano::node & node_a, std::shared_ptr<nano::block> block_a, nano::block_hash const & hash_a) :
transaction (transaction_a),
node (node_a),
block (block_a),
hash (hash_a)
{
}
virtual ~confirmed_visitor () = default;
void scan_receivable (nano::account const & account_a)
{
for (auto i (node.wallets.items.begin ()), n (node.wallets.items.end ()); i != n; ++i)
{
auto wallet (i->second);
auto transaction_l (node.wallets.tx_begin_read ());
if (wallet->store.exists (transaction_l, account_a))
{
nano::account representative;
nano::pending_info pending;
representative = wallet->store.representative (transaction_l);
auto error (node.store.pending_get (transaction, nano::pending_key (account_a, hash), pending));
if (!error)
{
auto node_l (node.shared ());
auto amount (pending.amount.number ());
wallet->receive_async (block, representative, amount, [](std::shared_ptr<nano::block>) {});
}
else
{
if (!node.store.block_exists (transaction, hash))
{
node.logger.try_log (boost::str (boost::format ("Confirmed block is missing: %1%") % hash.to_string ()));
assert (false && "Confirmed block is missing");
}
else
{
node.logger.try_log (boost::str (boost::format ("Block %1% has already been received") % hash.to_string ()));
}
}
}
}
}
void state_block (nano::state_block const & block_a) override
{
scan_receivable (block_a.hashables.link);
}
void send_block (nano::send_block const & block_a) override
{
scan_receivable (block_a.hashables.destination);
}
void receive_block (nano::receive_block const &) override
{
}
void open_block (nano::open_block const &) override
{
}
void change_block (nano::change_block const &) override
{
}
nano::transaction const & transaction;
nano::node & node;
std::shared_ptr<nano::block> block;
nano::block_hash const & hash;
};
}
void nano::node::receive_confirmed (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a, nano::block_hash const & hash_a)
{
confirmed_visitor visitor (transaction_a, *this, block_a, hash_a);
block_a->visit (visitor);
}
void nano::node::process_confirmed (std::shared_ptr<nano::block> block_a, uint8_t iteration)
{
auto hash (block_a->hash ());
if (ledger.block_exists (block_a->type (), hash))
{
add_confirmation_heights (hash);
auto transaction (store.tx_begin_read ());
receive_confirmed (transaction, block_a, hash);
auto account (ledger.account (transaction, hash));
auto amount (ledger.amount (transaction, hash));
bool is_state_send (false);
nano::account pending_account (0);
if (auto state = dynamic_cast<nano::state_block *> (block_a.get ()))
{
is_state_send = ledger.is_send (transaction, *state);
pending_account = state->hashables.link;
}
if (auto send = dynamic_cast<nano::send_block *> (block_a.get ()))
{
pending_account = send->hashables.destination;
}
observers.blocks.notify (block_a, account, amount, is_state_send);
if (amount > 0)
{
observers.account_balance.notify (account, false);
if (!pending_account.is_zero ())
{
observers.account_balance.notify (pending_account, true);
}
}
}
// Limit to 0.5 * 20 = 10 seconds (more than max block_processor::process_batch finish time)
else if (iteration < 20)
{
iteration++;
std::weak_ptr<nano::node> node_w (shared ());
alarm.add (std::chrono::steady_clock::now () + network_params.node.process_confirmed_interval, [node_w, block_a, iteration]() {
if (auto node_l = node_w.lock ())
{
node_l->process_confirmed (block_a, iteration);
}
});
}
}
void nano::node::process_message (nano::message const & message_a, std::shared_ptr<nano::transport::channel> channel_a)
{
network_message_visitor visitor (*this, channel_a);
message_a.visit (visitor);
}
nano::endpoint nano::network::endpoint ()
{
return udp_channels.local_endpoint ();
}
void nano::network::cleanup (std::chrono::steady_clock::time_point const & cutoff_a)
{
node.network.udp_channels.purge (cutoff_a);
if (node.network.empty ())
{
disconnect_observer ();
}
}
void nano::network::ongoing_cleanup ()
{
cleanup (std::chrono::steady_clock::now () - node.network_params.node.cutoff);
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + node.network_params.node.period, [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->network.ongoing_cleanup ();
}
});
}
size_t nano::network::size () const
{
return udp_channels.size ();
}
size_t nano::network::size_sqrt () const
{
return (static_cast<size_t> (std::ceil (std::sqrt (size ()))));
}
bool nano::network::empty () const
{
return size () == 0;
}
bool nano::block_arrival::add (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ());
auto inserted (arrival.insert (nano::block_arrival_info{ now, hash_a }));
auto result (!inserted.second);
return result;
}
bool nano::block_arrival::recent (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ());
while (arrival.size () > arrival_size_min && arrival.begin ()->arrival + arrival_time_min < now)
{
arrival.erase (arrival.begin ());
}
return arrival.get<1> ().find (hash_a) != arrival.get<1> ().end ();
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_arrival & block_arrival, const std::string & name)
{
size_t count = 0;
{
std::lock_guard<std::mutex> guard (block_arrival.mutex);
count = block_arrival.arrival.size ();
}
auto sizeof_element = sizeof (decltype (block_arrival.arrival)::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "arrival", count, sizeof_element }));
return composite;
}
}
nano::online_reps::online_reps (nano::node & node_a, nano::uint128_t minimum_a) :
node (node_a),
minimum (minimum_a)
{
auto transaction (node.ledger.store.tx_begin_read ());
online = trend (transaction);
}
void nano::online_reps::observe (nano::account const & rep_a)
{
auto transaction (node.ledger.store.tx_begin_read ());
if (node.ledger.weight (transaction, rep_a) > 0)
{
std::lock_guard<std::mutex> lock (mutex);
reps.insert (rep_a);
}
}
void nano::online_reps::sample ()
{
auto transaction (node.ledger.store.tx_begin_write ());
// Discard oldest entries
while (node.ledger.store.online_weight_count (transaction) >= node.network_params.node.max_weight_samples)
{
auto oldest (node.ledger.store.online_weight_begin (transaction));
assert (oldest != node.ledger.store.online_weight_end ());
node.ledger.store.online_weight_del (transaction, oldest->first);
}
// Calculate current active rep weight
nano::uint128_t current;
std::unordered_set<nano::account> reps_copy;
{
std::lock_guard<std::mutex> lock (mutex);
reps_copy.swap (reps);
}
for (auto & i : reps_copy)
{
current += node.ledger.weight (transaction, i);
}
node.ledger.store.online_weight_put (transaction, std::chrono::system_clock::now ().time_since_epoch ().count (), current);
auto trend_l (trend (transaction));
std::lock_guard<std::mutex> lock (mutex);
online = trend_l;
}
nano::uint128_t nano::online_reps::trend (nano::transaction & transaction_a)
{
std::vector<nano::uint128_t> items;
items.reserve (node.network_params.node.max_weight_samples + 1);
items.push_back (minimum);
for (auto i (node.ledger.store.online_weight_begin (transaction_a)), n (node.ledger.store.online_weight_end ()); i != n; ++i)
{
items.push_back (i->second.number ());
}
// Pick median value for our target vote weight
auto median_idx = items.size () / 2;
nth_element (items.begin (), items.begin () + median_idx, items.end ());
return nano::uint128_t{ items[median_idx] };
}
nano::uint128_t nano::online_reps::online_stake ()
{
std::lock_guard<std::mutex> lock (mutex);
return std::max (online, minimum);
}
std::vector<nano::account> nano::online_reps::list ()
{
std::vector<nano::account> result;
std::lock_guard<std::mutex> lock (mutex);
for (auto & i : reps)
{
result.push_back (i);
}
return result;
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (online_reps & online_reps, const std::string & name)
{
size_t count = 0;
{
std::lock_guard<std::mutex> guard (online_reps.mutex);
count = online_reps.reps.size ();
}
auto sizeof_element = sizeof (decltype (online_reps.reps)::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "arrival", count, sizeof_element }));
return composite;
}
}
std::shared_ptr<nano::node> nano::node::shared ()
{
return shared_from_this ();
}
nano::election_vote_result::election_vote_result () :
replay (false),
processed (false)
{
}
nano::election_vote_result::election_vote_result (bool replay_a, bool processed_a)
{
replay = replay_a;
processed = processed_a;
}
nano::election::election (nano::node & node_a, std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) :
confirmation_action (confirmation_action_a),
node (node_a),
election_start (std::chrono::steady_clock::now ()),
status ({ block_a, 0 }),
confirmed (false),
stopped (false),
announcements (0)
{
last_votes.insert (std::make_pair (node.network_params.ledger.not_an_account (), nano::vote_info{ std::chrono::steady_clock::now (), 0, block_a->hash () }));
blocks.insert (std::make_pair (block_a->hash (), block_a));
update_dependent ();
}
void nano::election::compute_rep_votes (nano::transaction const & transaction_a)
{
if (node.config.enable_voting)
{
node.wallets.foreach_representative (transaction_a, [this, &transaction_a](nano::public_key const & pub_a, nano::raw_key const & prv_a) {
auto vote (this->node.store.vote_generate (transaction_a, pub_a, prv_a, status.winner));
this->node.vote_processor.vote (vote, std::make_shared<nano::transport::channel_udp> (this->node.network.udp_channels, this->node.network.endpoint ()));
});
}
}
void nano::election::confirm_once ()
{
if (!confirmed.exchange (true))
{
status.election_end = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ());
status.election_duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now () - election_start);
auto winner_l (status.winner);
auto node_l (node.shared ());
auto confirmation_action_l (confirmation_action);
node.background ([node_l, winner_l, confirmation_action_l]() {
node_l->process_confirmed (winner_l);
confirmation_action_l (winner_l);
});
}
}
void nano::election::stop ()
{
stopped = true;
}
bool nano::election::have_quorum (nano::tally_t const & tally_a, nano::uint128_t tally_sum)
{
bool result = false;
if (tally_sum >= node.config.online_weight_minimum.number ())
{
auto i (tally_a.begin ());
auto first (i->first);
++i;
auto second (i != tally_a.end () ? i->first : 0);
auto delta_l (node.delta ());
result = tally_a.begin ()->first > (second + delta_l);
}
return result;
}
nano::tally_t nano::election::tally (nano::transaction const & transaction_a)
{
std::unordered_map<nano::block_hash, nano::uint128_t> block_weights;
for (auto vote_info : last_votes)
{
block_weights[vote_info.second.hash] += node.ledger.weight (transaction_a, vote_info.first);
}
last_tally = block_weights;
nano::tally_t result;
for (auto item : block_weights)
{
auto block (blocks.find (item.first));
if (block != blocks.end ())
{
result.insert (std::make_pair (item.second, block->second));
}
}
return result;
}
void nano::election::confirm_if_quorum (nano::transaction const & transaction_a)
{
auto tally_l (tally (transaction_a));
assert (!tally_l.empty ());
auto winner (tally_l.begin ());
auto block_l (winner->second);
status.tally = winner->first;
nano::uint128_t sum (0);
for (auto & i : tally_l)
{
sum += i.first;
}
if (sum >= node.config.online_weight_minimum.number () && block_l->hash () != status.winner->hash ())
{
auto node_l (node.shared ());
node_l->block_processor.force (block_l);
status.winner = block_l;
update_dependent ();
node_l->active.adjust_difficulty (block_l->hash ());
}
if (have_quorum (tally_l, sum))
{
if (node.config.logging.vote_logging () || blocks.size () > 1)
{
log_votes (tally_l);
}
confirm_once ();
}
}
void nano::election::log_votes (nano::tally_t const & tally_a)
{
std::stringstream tally;
tally << boost::str (boost::format ("\nVote tally for root %1%") % status.winner->root ().to_string ());
for (auto i (tally_a.begin ()), n (tally_a.end ()); i != n; ++i)
{
tally << boost::str (boost::format ("\nBlock %1% weight %2%") % i->second->hash ().to_string () % i->first.convert_to<std::string> ());
}
for (auto i (last_votes.begin ()), n (last_votes.end ()); i != n; ++i)
{
tally << boost::str (boost::format ("\n%1% %2%") % i->first.to_account () % i->second.hash.to_string ());
}
node.logger.try_log (tally.str ());
}
nano::election_vote_result nano::election::vote (nano::account rep, uint64_t sequence, nano::block_hash block_hash)
{
// see republish_vote documentation for an explanation of these rules
auto transaction (node.store.tx_begin_read ());
auto replay (false);
auto supply (node.online_reps.online_stake ());
auto weight (node.ledger.weight (transaction, rep));
auto should_process (false);
if (node.network_params.is_test_network () || weight > supply / 1000) // 0.1% or above
{
unsigned int cooldown;
if (weight < supply / 100) // 0.1% to 1%
{
cooldown = 15;
}
else if (weight < supply / 20) // 1% to 5%
{
cooldown = 5;
}
else // 5% or above
{
cooldown = 1;
}
auto last_vote_it (last_votes.find (rep));
if (last_vote_it == last_votes.end ())
{
should_process = true;
}
else
{
auto last_vote (last_vote_it->second);
if (last_vote.sequence < sequence || (last_vote.sequence == sequence && last_vote.hash < block_hash))
{
if (last_vote.time <= std::chrono::steady_clock::now () - std::chrono::seconds (cooldown))
{
should_process = true;
}
}
else
{
replay = true;
}
}
if (should_process)
{
last_votes[rep] = { std::chrono::steady_clock::now (), sequence, block_hash };
if (!confirmed)
{
confirm_if_quorum (transaction);
}
}
}
return nano::election_vote_result (replay, should_process);
}
bool nano::node::validate_block_by_previous (nano::transaction const & transaction, std::shared_ptr<nano::block> block_a)
{
bool result (false);
nano::account account;
if (!block_a->previous ().is_zero ())
{
if (store.block_exists (transaction, block_a->previous ()))
{
account = ledger.account (transaction, block_a->previous ());
}
else
{
result = true;
}
}
else
{
account = block_a->root ();
}
if (!result && block_a->type () == nano::block_type::state)
{
std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a));
nano::amount prev_balance (0);
if (!block_l->hashables.previous.is_zero ())
{
if (store.block_exists (transaction, block_l->hashables.previous))
{
prev_balance = ledger.balance (transaction, block_l->hashables.previous);
}
else
{
result = true;
}
}
if (!result)
{
if (block_l->hashables.balance == prev_balance && !ledger.epoch_link.is_zero () && ledger.is_epoch_link (block_l->hashables.link))
{
account = ledger.epoch_signer;
}
}
}
if (!result && (account.is_zero () || nano::validate_message (account, block_a->hash (), block_a->block_signature ())))
{
result = true;
}
return result;
}
bool nano::election::publish (std::shared_ptr<nano::block> block_a)
{
auto result (false);
if (blocks.size () >= 10)
{
if (last_tally[block_a->hash ()] < node.online_reps.online_stake () / 10)
{
result = true;
}
}
if (!result)
{
auto transaction (node.store.tx_begin_read ());
result = node.validate_block_by_previous (transaction, block_a);
if (!result)
{
if (blocks.find (block_a->hash ()) == blocks.end ())
{
blocks.insert (std::make_pair (block_a->hash (), block_a));
confirm_if_quorum (transaction);
node.network.flood_block (block_a);
}
}
}
return result;
}
size_t nano::election::last_votes_size ()
{
std::lock_guard<std::mutex> lock (node.active.mutex);
return last_votes.size ();
}
void nano::active_transactions::confirm_frontiers (nano::transaction const & transaction_a)
{
// Limit maximum count of elections to start
bool representative (node.config.enable_voting && node.wallets.reps_count > 0);
/* Check less frequently for non-representative nodes
~15 minutes for non-representative nodes, 3 minutes for representatives */
int representative_factor = representative ? 3 * 60 : 15 * 60;
// Decrease check time for test network
int test_network_factor = node.network_params.is_test_network () ? 1000 : 1;
if (std::chrono::steady_clock::now () >= next_frontier_check)
{
size_t max_elections (max_broadcast_queue / 4);
size_t elections_count (0);
for (auto i (node.store.latest_begin (transaction_a, next_frontier_account)), n (node.store.latest_end ()); i != n && elections_count < max_elections; ++i)
{
nano::account_info info (i->second);
if (info.block_count != info.confirmation_height)
{
auto block (node.store.block_get (transaction_a, info.head));
if (!start (block))
{
++elections_count;
// Calculate votes for local representatives
if (representative)
{
node.block_processor.generator.add (block->hash ());
}
}
// Update next account
next_frontier_account = i->first.number () + 1;
}
}
// 4 times slower check if all frontiers were confirmed
int fully_confirmed_factor = (elections_count <= max_elections) ? 4 : 1;
// Calculate next check time
next_frontier_check = std::chrono::steady_clock::now () + std::chrono::seconds ((representative_factor * fully_confirmed_factor) / test_network_factor);
// Set next account to 0 if all frontiers were confirmed
next_frontier_account = (elections_count <= max_elections) ? 0 : next_frontier_account;
}
}
void nano::election::update_dependent ()
{
assert (!node.active.mutex.try_lock ());
std::vector<nano::block_hash> blocks_search;
auto hash (status.winner->hash ());
auto previous (status.winner->previous ());
if (!previous.is_zero ())
{
blocks_search.push_back (previous);
}
auto source (status.winner->source ());
if (!source.is_zero () && source != previous)
{
blocks_search.push_back (source);
}
auto link (status.winner->link ());
if (!link.is_zero () && !node.ledger.is_epoch_link (link) && link != previous)
{
blocks_search.push_back (link);
}
for (auto & block_search : blocks_search)
{
auto existing (node.active.blocks.find (block_search));
if (existing != node.active.blocks.end () && !existing->second->confirmed && !existing->second->stopped)
{
if (existing->second->dependent_blocks.find (hash) == existing->second->dependent_blocks.end ())
{
existing->second->dependent_blocks.insert (hash);
}
}
}
}
void nano::active_transactions::request_confirm (std::unique_lock<std::mutex> & lock_a)
{
std::unordered_set<nano::uint512_union> inactive;
auto transaction (node.store.tx_begin_read ());
unsigned unconfirmed_count (0);
unsigned unconfirmed_announcements (0);
std::unordered_map<std::shared_ptr<nano::transport::channel>, std::vector<std::pair<nano::block_hash, nano::block_hash>>> requests_bundle;
std::deque<std::shared_ptr<nano::block>> rebroadcast_bundle;
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<std::shared_ptr<nano::transport::channel>>>>> confirm_req_bundle;
auto roots_size (roots.size ());
for (auto i (roots.get<1> ().begin ()), n (roots.get<1> ().end ()); i != n; ++i)
{
auto root (i->root);
auto election_l (i->election);
if ((election_l->confirmed || election_l->stopped) && election_l->announcements >= announcement_min - 1)
{
if (election_l->confirmed)
{
confirmed.push_back (election_l->status);
if (confirmed.size () > election_history_size)
{
confirmed.pop_front ();
}
}
inactive.insert (root);
}
else
{
if (election_l->announcements > announcement_long)
{
++unconfirmed_count;
unconfirmed_announcements += election_l->announcements;
// Log votes for very long unconfirmed elections
if (election_l->announcements % 50 == 1)
{
auto tally_l (election_l->tally (transaction));
election_l->log_votes (tally_l);
}
/* Escalation for long unconfirmed elections
Start new elections for previous block & source
if there are less than 100 active elections */
if (election_l->announcements % announcement_long == 1 && roots_size < 100 && !node.network_params.is_test_network ())
{
std::shared_ptr<nano::block> previous;
auto previous_hash (election_l->status.winner->previous ());
if (!previous_hash.is_zero ())
{
previous = node.store.block_get (transaction, previous_hash);
if (previous != nullptr)
{
add (std::move (previous));
}
}
/* If previous block not existing/not commited yet, block_source can cause segfault for state blocks
So source check can be done only if previous != nullptr or previous is 0 (open account) */
if (previous_hash.is_zero () || previous != nullptr)
{
auto source_hash (node.ledger.block_source (transaction, *election_l->status.winner));
if (!source_hash.is_zero ())
{
auto source (node.store.block_get (transaction, source_hash));
if (source != nullptr)
{
add (std::move (source));
}
}
}
election_l->update_dependent ();
}
}
if (election_l->announcements < announcement_long || election_l->announcements % announcement_long == 1)
{
if (node.ledger.could_fit (transaction, *election_l->status.winner))
{
// Broadcast winner
if (rebroadcast_bundle.size () < max_broadcast_queue)
{
rebroadcast_bundle.push_back (election_l->status.winner);
}
}
else
{
if (election_l->announcements != 0)
{
election_l->stop ();
}
}
}
if (election_l->announcements % 4 == 1)
{
auto rep_channels (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
auto reps (node.rep_crawler.representatives (std::numeric_limits<size_t>::max ()));
// Add all rep endpoints that haven't already voted. We use a set since multiple
// reps may exist on an endpoint.
std::unordered_set<std::shared_ptr<nano::transport::channel>> channels;
for (auto & rep : reps)
{
if (election_l->last_votes.find (rep.account) == election_l->last_votes.end ())
{
channels.insert (rep.channel);
if (node.config.logging.vote_logging ())
{
node.logger.try_log ("Representative did not respond to confirm_req, retrying: ", rep.account.to_account ());
}
}
}
rep_channels->insert (rep_channels->end (), channels.begin (), channels.end ());
if ((!rep_channels->empty () && node.rep_crawler.total_weight () > node.config.online_weight_minimum.number ()) || roots_size > 5)
{
// broadcast_confirm_req_base modifies reps, so we clone it once to avoid aliasing
if (!node.network_params.is_test_network ())
{
if (confirm_req_bundle.size () < max_broadcast_queue)
{
confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, rep_channels));
}
}
else
{
for (auto & rep : *rep_channels)
{
auto rep_request (requests_bundle.find (rep));
auto block (election_l->status.winner);
auto root_hash (std::make_pair (block->hash (), block->root ()));
if (rep_request == requests_bundle.end ())
{
if (requests_bundle.size () < max_broadcast_queue)
{
std::vector<std::pair<nano::block_hash, nano::block_hash>> insert_vector = { root_hash };
requests_bundle.insert (std::make_pair (rep, insert_vector));
}
}
else if (rep_request->second.size () < max_broadcast_queue * nano::network::confirm_req_hashes_max)
{
rep_request->second.push_back (root_hash);
}
}
}
}
else
{
if (!node.network_params.is_test_network ())
{
auto deque_l (node.network.udp_channels.random_set (100));
auto vec (std::make_shared<std::vector<std::shared_ptr<nano::transport::channel>>> ());
for (auto i : deque_l)
{
vec->push_back (i);
}
confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, vec));
}
else
{
for (auto & rep : *rep_channels)
{
auto rep_request (requests_bundle.find (rep));
auto block (election_l->status.winner);
auto root_hash (std::make_pair (block->hash (), block->root ()));
if (rep_request == requests_bundle.end ())
{
std::vector<std::pair<nano::block_hash, nano::block_hash>> insert_vector = { root_hash };
requests_bundle.insert (std::make_pair (rep, insert_vector));
}
else
{
rep_request->second.push_back (root_hash);
}
}
}
}
}
}
++election_l->announcements;
}
lock_a.unlock ();
// Rebroadcast unconfirmed blocks
if (!rebroadcast_bundle.empty ())
{
node.network.flood_block_batch (rebroadcast_bundle);
}
// Batch confirmation request
if (!node.network_params.is_live_network () && !requests_bundle.empty ())
{
node.network.broadcast_confirm_req_batch (requests_bundle, 50);
}
//confirm_req broadcast
if (!confirm_req_bundle.empty ())
{
node.network.broadcast_confirm_req_batch (confirm_req_bundle);
}
// Confirm frontiers
confirm_frontiers (transaction);
lock_a.lock ();
// Erase inactive elections
for (auto i (inactive.begin ()), n (inactive.end ()); i != n; ++i)
{
auto root_it (roots.find (*i));
assert (root_it != roots.end ());
for (auto & block : root_it->election->blocks)
{
auto erased (blocks.erase (block.first));
(void)erased;
assert (erased == 1);
}
for (auto & dependent_block : root_it->election->dependent_blocks)
{
adjust_difficulty (dependent_block);
}
roots.erase (*i);
}
if (unconfirmed_count > 0)
{
node.logger.try_log (boost::str (boost::format ("%1% blocks have been unconfirmed averaging %2% announcements") % unconfirmed_count % (unconfirmed_announcements / unconfirmed_count)));
}
}
void nano::active_transactions::request_loop ()
{
std::unique_lock<std::mutex> lock (mutex);
started = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
while (!stopped)
{
request_confirm (lock);
const auto extra_delay (std::min (roots.size (), max_broadcast_queue) * node.network.broadcast_interval_ms * 2);
condition.wait_for (lock, std::chrono::milliseconds (node.network_params.request_interval_ms + extra_delay));
}
}
void nano::active_transactions::stop ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
stopped = true;
lock.unlock ();
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
lock.lock ();
roots.clear ();
}
bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
std::lock_guard<std::mutex> lock (mutex);
return add (block_a, confirmation_action_a);
}
bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
auto error (true);
if (!stopped)
{
auto root (nano::uint512_union (block_a->previous (), block_a->root ()));
auto existing (roots.find (root));
if (existing == roots.end ())
{
auto election (std::make_shared<nano::election> (node, block_a, confirmation_action_a));
uint64_t difficulty (0);
auto error (nano::work_validate (*block_a, &difficulty));
release_assert (!error);
roots.insert (nano::conflict_info{ root, difficulty, difficulty, election });
blocks.insert (std::make_pair (block_a->hash (), election));
adjust_difficulty (block_a->hash ());
}
error = existing != roots.end ();
}
return error;
}
// Validate a vote and apply it to the current election if one exists
bool nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a, bool single_lock)
{
std::shared_ptr<nano::election> election;
bool replay (false);
bool processed (false);
{
std::unique_lock<std::mutex> lock;
if (!single_lock)
{
lock = std::unique_lock<std::mutex> (mutex);
}
for (auto vote_block : vote_a->blocks)
{
nano::election_vote_result result;
if (vote_block.which ())
{
auto block_hash (boost::get<nano::block_hash> (vote_block));
auto existing (blocks.find (block_hash));
if (existing != blocks.end ())
{
result = existing->second->vote (vote_a->account, vote_a->sequence, block_hash);
}
}
else
{
auto block (boost::get<std::shared_ptr<nano::block>> (vote_block));
auto existing (roots.find (nano::uint512_union (block->previous (), block->root ())));
if (existing != roots.end ())
{
result = existing->election->vote (vote_a->account, vote_a->sequence, block->hash ());
}
}
replay = replay || result.replay;
processed = processed || result.processed;
}
}
if (processed)
{
node.network.flood_vote (vote_a);
}
return replay;
}
bool nano::active_transactions::active (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
return roots.find (nano::uint512_union (block_a.previous (), block_a.root ())) != roots.end ();
}
void nano::active_transactions::update_difficulty (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (roots.find (nano::uint512_union (block_a.previous (), block_a.root ())));
if (existing != roots.end ())
{
uint64_t difficulty;
auto error (nano::work_validate (block_a, &difficulty));
assert (!error);
if (difficulty > existing->difficulty)
{
roots.modify (existing, [difficulty](nano::conflict_info & info_a) {
info_a.difficulty = difficulty;
});
adjust_difficulty (block_a.hash ());
}
}
}
void nano::active_transactions::adjust_difficulty (nano::block_hash const & hash_a)
{
assert (!mutex.try_lock ());
std::deque<std::pair<nano::block_hash, int64_t>> remaining_blocks;
remaining_blocks.emplace_back (hash_a, 0);
std::unordered_set<nano::block_hash> processed_blocks;
std::vector<std::pair<nano::uint512_union, int64_t>> elections_list;
uint128_t sum (0);
while (!remaining_blocks.empty ())
{
auto const & item (remaining_blocks.front ());
auto hash (item.first);
auto level (item.second);
if (processed_blocks.find (hash) == processed_blocks.end ())
{
auto existing (blocks.find (hash));
if (existing != blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash)
{
auto previous (existing->second->status.winner->previous ());
if (!previous.is_zero ())
{
remaining_blocks.emplace_back (previous, level + 1);
}
auto source (existing->second->status.winner->source ());
if (!source.is_zero () && source != previous)
{
remaining_blocks.emplace_back (source, level + 1);
}
auto link (existing->second->status.winner->link ());
if (!link.is_zero () && !node.ledger.is_epoch_link (link) && link != previous)
{
remaining_blocks.emplace_back (link, level + 1);
}
for (auto & dependent_block : existing->second->dependent_blocks)
{
remaining_blocks.emplace_back (dependent_block, level - 1);
}
processed_blocks.insert (hash);
nano::uint512_union root (previous, existing->second->status.winner->root ());
auto existing_root (roots.find (root));
if (existing_root != roots.end ())
{
sum += existing_root->difficulty;
elections_list.emplace_back (root, level);
}
}
}
remaining_blocks.pop_front ();
}
if (elections_list.size () > 1)
{
uint64_t average (static_cast<uint64_t> (sum / elections_list.size ()));
// Potential overflow check
uint64_t divider (1);
if (elections_list.size () > 1000000 && (average - node.network_params.publish_threshold) > elections_list.size ())
{
divider = ((average - node.network_params.publish_threshold) / elections_list.size ()) + 1;
}
// Set adjusted difficulty
for (auto & item : elections_list)
{
auto existing_root (roots.find (item.first));
uint64_t difficulty_a (average + (item.second / divider));
roots.modify (existing_root, [difficulty_a](nano::conflict_info & info_a) {
info_a.adjusted_difficulty = difficulty_a;
});
}
}
// Set adjusted difficulty equals to difficulty
else if (elections_list.size () == 1)
{
auto existing_root (roots.find (elections_list.begin ()->first));
if (existing_root->difficulty != existing_root->adjusted_difficulty)
{
roots.modify (existing_root, [](nano::conflict_info & info_a) {
info_a.adjusted_difficulty = info_a.difficulty;
});
}
}
}
// List of active blocks in elections
std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks (bool single_lock)
{
std::deque<std::shared_ptr<nano::block>> result;
std::unique_lock<std::mutex> lock;
if (!single_lock)
{
lock = std::unique_lock<std::mutex> (mutex);
}
for (auto i (roots.begin ()), n (roots.end ()); i != n; ++i)
{
result.push_back (i->election->status.winner);
}
return result;
}
std::deque<nano::election_status> nano::active_transactions::list_confirmed ()
{
std::lock_guard<std::mutex> lock (mutex);
return confirmed;
}
void nano::active_transactions::erase (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
if (roots.find (nano::uint512_union (block_a.previous (), block_a.root ())) != roots.end ())
{
roots.erase (nano::uint512_union (block_a.previous (), block_a.root ()));
node.logger.try_log (boost::str (boost::format ("Election erased for block block %1% root %2%") % block_a.hash ().to_string () % block_a.root ().to_string ()));
}
}
bool nano::active_transactions::empty ()
{
std::lock_guard<std::mutex> lock (mutex);
return roots.empty ();
}
size_t nano::active_transactions::size ()
{
std::lock_guard<std::mutex> lock (mutex);
return roots.size ();
}
nano::active_transactions::active_transactions (nano::node & node_a) :
node (node_a),
started (false),
stopped (false),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::request_loop);
request_loop ();
})
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
}
nano::active_transactions::~active_transactions ()
{
stop ();
}
bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (roots.find (nano::uint512_union (block_a->previous (), block_a->root ())));
auto result (true);
if (existing != roots.end ())
{
result = existing->election->publish (block_a);
if (!result)
{
blocks.insert (std::make_pair (block_a->hash (), existing->election));
}
}
return result;
}
void nano::active_transactions::confirm_block (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (blocks.find (hash_a));
if (existing != blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->status.winner->hash () == hash_a)
{
existing->second->confirm_once ();
}
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (active_transactions & active_transactions, const std::string & name)
{
size_t roots_count = 0;
size_t blocks_count = 0;
size_t confirmed_count = 0;
{
std::lock_guard<std::mutex> guard (active_transactions.mutex);
roots_count = active_transactions.roots.size ();
blocks_count = active_transactions.blocks.size ();
confirmed_count = active_transactions.confirmed.size ();
}
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "roots", roots_count, sizeof (decltype (active_transactions.roots)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", blocks_count, sizeof (decltype (active_transactions.blocks)::value_type) }));
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "confirmed", confirmed_count, sizeof (decltype (active_transactions.confirmed)::value_type) }));
return composite;
}
}
/**
* For all the blocks below this height which have been implicitly confirmed check if they
* are open/receive blocks, and if so follow the source blocks and iteratively repeat to genesis.
*/
void nano::node::add_confirmation_heights (nano::block_hash const & hash_a)
{
auto transaction (store.tx_begin_write ());
std::stack<nano::block_hash, std::vector<nano::block_hash>> open_receive_blocks;
auto current = hash_a;
nano::genesis genesis;
do
{
if (!open_receive_blocks.empty ())
{
current = open_receive_blocks.top ();
open_receive_blocks.pop ();
}
auto hash (current);
auto block_height (store.block_account_height (transaction, hash));
assert (block_height >= 0);
nano::account_info account_info;
nano::account account (ledger.account (transaction, hash));
release_assert (!store.account_get (transaction, account, account_info));
auto confirmation_height = account_info.confirmation_height;
if (block_height > confirmation_height)
{
account_info.confirmation_height = block_height;
store.account_put (transaction, account, account_info);
// Get the difference and check if any of these are recieve blocks
auto num_confirmed_blocks = block_height - confirmation_height;
// Start from the most recent one and work our way through
for (uint64_t i = 0; i < num_confirmed_blocks && !current.is_zero (); ++i)
{
auto block (store.block_get (transaction, current));
if (block != nullptr)
{
// Confirm blocks back
active.confirm_block (current);
// First check legacy receive/open
if (block->type () == nano::block_type::receive || (block->type () == nano::block_type::open && current != genesis.hash ()))
{
open_receive_blocks.push (block->source ());
}
else
{
// Then check state blocks
auto state = std::dynamic_pointer_cast<nano::state_block> (block);
if (state != nullptr)
{
nano::block_hash previous (state->hashables.previous);
if (!previous.is_zero ())
{
if (state->hashables.balance.number () >= ledger.balance (transaction, previous) && !state->hashables.link.is_zero () && !ledger.is_epoch_link (state->hashables.link))
{
open_receive_blocks.push (state->hashables.link);
}
}
// State open blocks are always receive or epoch
else if (!ledger.is_epoch_link (state->hashables.link))
{
open_receive_blocks.push (state->hashables.link);
}
}
}
current = block->previous ();
}
}
}
} while (!open_receive_blocks.empty ());
}
int nano::node::store_version ()
{
auto transaction (store.tx_begin_read ());
return store.version_get (transaction);
}
nano::thread_runner::thread_runner (boost::asio::io_context & io_ctx_a, unsigned service_threads_a)
{
boost::thread::attributes attrs;
nano::thread_attributes::set (attrs);
for (auto i (0u); i < service_threads_a; ++i)
{
threads.push_back (boost::thread (attrs, [&io_ctx_a]() {
nano::thread_role::set (nano::thread_role::name::io);
try
{
io_ctx_a.run ();
}
catch (...)
{
#ifndef NDEBUG
/*
* In a release build, catch and swallow the
* io_context exception, in debug mode pass it
* on
*/
throw;
#endif
}
}));
}
}
nano::thread_runner::~thread_runner ()
{
join ();
}
void nano::thread_runner::join ()
{
for (auto & i : threads)
{
if (i.joinable ())
{
i.join ();
}
}
}
nano::inactive_node::inactive_node (boost::filesystem::path const & path_a, uint16_t peering_port_a) :
path (path_a),
io_context (std::make_shared<boost::asio::io_context> ()),
alarm (*io_context),
work (1, nullptr),
peering_port (peering_port_a)
{
boost::system::error_code error_chmod;
/*
* @warning May throw a filesystem exception
*/
boost::filesystem::create_directories (path);
nano::set_secure_perm_directory (path, error_chmod);
logging.max_size = std::numeric_limits<std::uintmax_t>::max ();
logging.init (path);
node = std::make_shared<nano::node> (init, *io_context, peering_port, path, alarm, logging, work);
}
nano::inactive_node::~inactive_node ()
{
node->stop ();
}
nano::message_buffer_manager::message_buffer_manager (nano::stat & stats_a, size_t size, size_t count) :
stats (stats_a),
free (count),
full (count),
slab (size * count),
entries (count),
stopped (false)
{
assert (count > 0);
assert (size > 0);
auto slab_data (slab.data ());
auto entry_data (entries.data ());
for (auto i (0); i < count; ++i, ++entry_data)
{
*entry_data = { slab_data + i * size, 0, nano::endpoint () };
free.push_back (entry_data);
}
}
nano::message_buffer * nano::message_buffer_manager::allocate ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped && free.empty () && full.empty ())
{
stats.inc (nano::stat::type::udp, nano::stat::detail::blocking, nano::stat::dir::in);
condition.wait (lock);
}
nano::message_buffer * result (nullptr);
if (!free.empty ())
{
result = free.front ();
free.pop_front ();
}
if (result == nullptr && !full.empty ())
{
result = full.front ();
full.pop_front ();
stats.inc (nano::stat::type::udp, nano::stat::detail::overflow, nano::stat::dir::in);
}
release_assert (result || stopped);
return result;
}
void nano::message_buffer_manager::enqueue (nano::message_buffer * data_a)
{
assert (data_a != nullptr);
{
std::lock_guard<std::mutex> lock (mutex);
full.push_back (data_a);
}
condition.notify_all ();
}
nano::message_buffer * nano::message_buffer_manager::dequeue ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped && full.empty ())
{
condition.wait (lock);
}
nano::message_buffer * result (nullptr);
if (!full.empty ())
{
result = full.front ();
full.pop_front ();
}
return result;
}
void nano::message_buffer_manager::release (nano::message_buffer * data_a)
{
assert (data_a != nullptr);
{
std::lock_guard<std::mutex> lock (mutex);
free.push_back (data_a);
}
condition.notify_all ();
}
void nano::message_buffer_manager::stop ()
{
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
}
| 1 | 15,134 | The bind address should be configurable. | nanocurrency-nano-node | cpp |
@@ -68,6 +68,11 @@ class LocalRepository implements Repository {
return fetchCategories(categoryDirectories);
}
+ @Override
+ public void delete() {
+ // nothing to do
+ }
+
private List<CategoryDTO> fetchCategories(File[] categoryDirectories) {
final List<CategoryDTO> results = new ArrayList<>();
| 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.apps;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.phoenicis.apps.dto.ApplicationDTO;
import org.phoenicis.apps.dto.CategoryDTO;
import org.phoenicis.apps.dto.ResourceDTO;
import org.phoenicis.apps.dto.ScriptDTO;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
class LocalRepository implements Repository {
private final static Logger LOGGER = LoggerFactory.getLogger(LocalRepository.class);
private static final String CATEGORY_ICON_NAME = "icon.png";
private final String repositoryDirectory;
private final ObjectMapper objectMapper;
private final String repositorySource;
private LocalRepository(String repositoryDirectory, String repositorySource, ObjectMapper objectMapper) {
this.repositoryDirectory = repositoryDirectory;
this.objectMapper = objectMapper;
this.repositorySource = repositorySource;
}
private LocalRepository(String repositoryDirectory, ObjectMapper objectMapper) {
this(repositoryDirectory, repositoryDirectory, objectMapper);
}
@Override
public List<CategoryDTO> fetchInstallableApplications() {
final File repositoryDirectoryFile = new File(repositoryDirectory);
final File[] categoryDirectories = repositoryDirectoryFile.listFiles();
if (categoryDirectories == null) {
return Collections.emptyList();
}
LOGGER.info("Reading directory : " + repositoryDirectory);
return fetchCategories(categoryDirectories);
}
private List<CategoryDTO> fetchCategories(File[] categoryDirectories) {
final List<CategoryDTO> results = new ArrayList<>();
for (File categoryDirectory : categoryDirectories) {
if (categoryDirectory.isDirectory() && !categoryDirectory.getName().startsWith(".")) {
final File categoryFile = new File(categoryDirectory, "category.json");
final CategoryDTO.Builder categoryDTOBuilder = new CategoryDTO.Builder(unSerializeCategory(categoryFile))
.withName(categoryDirectory.getName())
.withApplications(fetchApplications(categoryDirectory));
final File categoryIconFile = new File(categoryDirectory, CATEGORY_ICON_NAME);
if (categoryIconFile.exists()) {
categoryDTOBuilder.withIcon("file:///" + categoryIconFile.getAbsolutePath());
}
CategoryDTO category = categoryDTOBuilder.build();
results.add(category);
}
}
Collections.sort(results, Comparator.comparing(CategoryDTO::getName));
return results;
}
private List<ApplicationDTO> fetchApplications(File categoryDirectory) {
final File[] applicationDirectories = categoryDirectory.listFiles();
if (applicationDirectories == null) {
return Collections.emptyList();
}
final List<ApplicationDTO> results = new ArrayList<>();
for (File applicationDirectory : applicationDirectories) {
if (applicationDirectory.isDirectory()) {
final ApplicationDTO.Builder applicationDTOBuilder = new ApplicationDTO.Builder(
unSerializeApplication(new File(applicationDirectory, "application.json")));
if (StringUtils.isBlank(applicationDTOBuilder.getName())) {
applicationDTOBuilder.withName(applicationDirectory.getName());
}
final File miniaturesDirectory = new File(applicationDirectory, "miniatures");
if (miniaturesDirectory.exists() && miniaturesDirectory.isDirectory()) {
try {
applicationDTOBuilder.withMiniatures(fetchMiniatures(miniaturesDirectory));
} catch (IOException e) {
LOGGER.warn("Unable to read miniatures", e);
}
}
applicationDTOBuilder.withScripts(fetchScripts(applicationDirectory));
applicationDTOBuilder.withResources(fetchResources(applicationDirectory));
ApplicationDTO app = applicationDTOBuilder.build();
results.add(app);
}
}
Collections.sort(results, Comparator.comparing(ApplicationDTO::getName));
return results;
}
private List<byte[]> fetchMiniatures(File miniaturesDirectory) throws IOException {
final List<byte[]> miniatures = new ArrayList<>();
final File[] miniatureFiles = miniaturesDirectory.listFiles();
if (miniatureFiles != null) {
for (File miniatureFile : miniatureFiles) {
if (!miniatureFile.isDirectory() && !miniatureFile.getName().startsWith(".")) {
if ("main.png".equals(miniatureFile.getName())) {
miniatures.add(0, IOUtils.toByteArray(new FileInputStream(miniatureFile)));
} else {
miniatures.add(IOUtils.toByteArray(new FileInputStream(miniatureFile)));
}
}
}
}
return miniatures;
}
private List<ResourceDTO> fetchResources(File applicationDirectory) {
final File[] resources = new File(applicationDirectory, "resources").listFiles();
if (resources == null) {
return Collections.emptyList();
}
final List<ResourceDTO> results = new ArrayList<>();
for (File resourceFile : resources) {
if(!resourceFile.isDirectory() && !resourceFile.getName().startsWith(".")) {
try {
results.add(new ResourceDTO(resourceFile.getName(), IOUtils.toByteArray(new FileInputStream(resourceFile))));
} catch (IOException ignored) {
}
}
}
return results;
}
private List<ScriptDTO> fetchScripts(File applicationDirectory) {
final File[] scriptDirectories = applicationDirectory.listFiles();
if (scriptDirectories == null) {
return Collections.emptyList();
}
final List<ScriptDTO> results = new ArrayList<>();
for (File scriptDirectory : scriptDirectories) {
if (scriptDirectory.isDirectory()
&& !"miniatures".equals(scriptDirectory.getName())
&& !"resources".equals(scriptDirectory.getName())) {
final ScriptDTO.Builder scriptDTOBuilder = new ScriptDTO.Builder(
unSerializeScript(new File(scriptDirectory, "script.json")));
scriptDTOBuilder.withScriptSource(repositorySource);
if (StringUtils.isBlank(scriptDTOBuilder.getScriptName())) {
scriptDTOBuilder.withScriptName(scriptDirectory.getName());
}
final File scriptFile = new File(scriptDirectory, "script.js");
if (scriptFile.exists()) {
try {
scriptDTOBuilder.withScript(
new String(IOUtils.toByteArray(new FileInputStream(scriptFile)))
);
} catch (IOException e) {
LOGGER.warn("Script not found", e);
}
}
results.add(scriptDTOBuilder.build());
}
}
return results;
}
private CategoryDTO unSerializeCategory(File jsonFile) {
try {
return objectMapper.readValue(jsonFile, CategoryDTO.class);
} catch (IOException e) {
LOGGER.debug("JSON file not found", e);
return new CategoryDTO.Builder().build();
}
}
private ScriptDTO unSerializeScript(File jsonFile) {
try {
return objectMapper.readValue(jsonFile, ScriptDTO.class);
} catch (IOException e) {
LOGGER.debug("JSON file not found");
return new ScriptDTO.Builder().build();
}
}
private ApplicationDTO unSerializeApplication(File jsonFile) {
try {
return objectMapper.readValue(jsonFile, ApplicationDTO.class);
} catch (IOException e) {
LOGGER.debug("JSON file not found", e);
return new ApplicationDTO.Builder().build();
}
}
static class Factory {
private final ObjectMapper objectMapper;
Factory(ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
}
public LocalRepository createInstance(String path) {
return new LocalRepository(path, objectMapper);
}
public LocalRepository createInstance(String path, String source) {
return new LocalRepository(path, source, objectMapper);
}
}
}
| 1 | 9,328 | Maybe the empty `delete()` should be implemented as default in the interface. | PhoenicisOrg-phoenicis | java |
@@ -24,7 +24,7 @@ class HDF5OutputLayerTest : public ::testing::Test {
protected:
HDF5OutputLayerTest()
: output_file_name_(tmpnam(NULL)),
- input_file_name_("src/caffe/test/test_data/sample_data.h5"),
+ input_file_name_(CMAKE_SOURCE_DIR "caffe/test/test_data/sample_data.h5"),
blob_data_(new Blob<Dtype>()),
blob_label_(new Blob<Dtype>()),
num_(5), | 1 | // Copyright 2014 BVLC and contributors.
#include <cuda_runtime.h>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/util/io.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/test/test_caffe_main.hpp"
using std::string;
using std::vector;
namespace caffe {
extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
template <typename Dtype>
class HDF5OutputLayerTest : public ::testing::Test {
protected:
HDF5OutputLayerTest()
: output_file_name_(tmpnam(NULL)),
input_file_name_("src/caffe/test/test_data/sample_data.h5"),
blob_data_(new Blob<Dtype>()),
blob_label_(new Blob<Dtype>()),
num_(5),
channels_(8),
height_(5),
width_(5) {}
virtual ~HDF5OutputLayerTest() {
delete blob_data_;
delete blob_label_;
}
void CheckBlobEqual(const Blob<Dtype>& b1, const Blob<Dtype>& b2);
string output_file_name_;
string input_file_name_;
Blob<Dtype>* const blob_data_;
Blob<Dtype>* const blob_label_;
vector<Blob<Dtype>*> blob_bottom_vec_;
vector<Blob<Dtype>*> blob_top_vec_;
int num_;
int channels_;
int height_;
int width_;
};
template <typename Dtype>
void HDF5OutputLayerTest<Dtype>::CheckBlobEqual(
const Blob<Dtype>& b1, const Blob<Dtype>& b2) {
EXPECT_EQ(b1.num(), b2.num());
EXPECT_EQ(b1.channels(), b2.channels());
EXPECT_EQ(b1.height(), b2.height());
EXPECT_EQ(b1.width(), b2.width());
for (int n = 0; n < b1.num(); ++n) {
for (int c = 0; c < b1.channels(); ++c) {
for (int h = 0; h < b1.height(); ++h) {
for (int w = 0; w < b1.width(); ++w) {
EXPECT_EQ(b1.data_at(n, c, h, w), b1.data_at(n, c, h, w));
}
}
}
}
}
typedef ::testing::Types<float, double> Dtypes;
TYPED_TEST_CASE(HDF5OutputLayerTest, Dtypes);
TYPED_TEST(HDF5OutputLayerTest, TestForward) {
LOG(INFO) << "Loading HDF5 file " << this->input_file_name_;
hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY,
H5P_DEFAULT);
ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" <<
this->input_file_name_;
hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
this->blob_data_);
hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
this->blob_label_);
herr_t status = H5Fclose(file_id);
EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
this->input_file_name_;
this->blob_bottom_vec_.push_back(this->blob_data_);
this->blob_bottom_vec_.push_back(this->blob_label_);
Caffe::Brew modes[] = { Caffe::CPU, Caffe::GPU };
for (int m = 0; m < 2; ++m) {
Caffe::set_mode(modes[m]);
LayerParameter param;
param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_);
// This code block ensures that the layer is deconstructed and
// the output hdf5 file is closed.
{
HDF5OutputLayer<TypeParam> layer(param);
EXPECT_EQ(layer.file_name(), this->output_file_name_);
layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
}
hid_t file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY,
H5P_DEFAULT);
ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" <<
this->input_file_name_;
Blob<TypeParam>* blob_data = new Blob<TypeParam>();
hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
blob_data);
this->CheckBlobEqual(*(this->blob_data_), *blob_data);
Blob<TypeParam>* blob_label = new Blob<TypeParam>();
hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
blob_label);
this->CheckBlobEqual(*(this->blob_label_), *blob_label);
herr_t status = H5Fclose(file_id);
EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
this->output_file_name_;
}
}
} // namespace caffe
| 1 | 28,407 | How to ensure CMAKE_SOURCE_DIR is set correctly? | BVLC-caffe | cpp |
@@ -106,7 +106,7 @@ func (ws *workingSet) LoadOrCreateAccountState(addr string, init *big.Int) (*Acc
switch {
case errors.Cause(err) == ErrStateNotExist:
account := Account{
- Balance: init,
+ Balance: big.NewInt(0).SetBytes(init.Bytes()),
VotingWeight: big.NewInt(0),
}
ws.cachedStates[addrHash] = &account | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package state
import (
"context"
"fmt"
"math/big"
"sort"
"github.com/pkg/errors"
"github.com/CoderZhi/go-ethereum/core/vm"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/trie"
)
type (
// WorkingSet defines an interface for working set of states changes
WorkingSet interface {
// states and actions
LoadOrCreateAccountState(string, *big.Int) (*Account, error)
Nonce(string) (uint64, error) // Note that Nonce starts with 1.
CachedAccountState(string) (*Account, error)
RunActions(context.Context, uint64, []action.Action) (hash.Hash32B, map[hash.Hash32B]*action.Receipt, error)
Commit() error
// contracts
GetCodeHash(hash.PKHash) (hash.Hash32B, error)
GetCode(hash.PKHash) ([]byte, error)
SetCode(hash.PKHash, []byte) error
GetContractState(hash.PKHash, hash.Hash32B) (hash.Hash32B, error)
SetContractState(hash.PKHash, hash.Hash32B, hash.Hash32B) error
// Accounts
RootHash() hash.Hash32B
Version() uint64
Height() uint64
// General state
State(hash.PKHash, State) (State, error)
CachedState(hash.PKHash, State) (State, error)
PutState(hash.PKHash, State) error
UpdateCachedStates(hash.PKHash, *Account)
}
// workingSet implements Workingset interface, tracks pending changes to account/contract in local cache
workingSet struct {
ver uint64
blkHeight uint64
cachedCandidates map[hash.PKHash]*Candidate
cachedStates map[hash.PKHash]State // states being modified in this block
cachedContract map[hash.PKHash]Contract // contracts being modified in this block
accountTrie trie.Trie // global account state trie
cb db.CachedBatch // cached batch for pending writes
dao db.KVStore // the underlying DB for account/contract storage
actionHandlers []ActionHandler
}
)
// NewWorkingSet creates a new working set
func NewWorkingSet(
version uint64,
kv db.KVStore,
root hash.Hash32B,
actionHandlers []ActionHandler,
) (WorkingSet, error) {
ws := &workingSet{
ver: version,
cachedCandidates: make(map[hash.PKHash]*Candidate),
cachedStates: make(map[hash.PKHash]State),
cachedContract: make(map[hash.PKHash]Contract),
cb: db.NewCachedBatch(),
dao: kv,
actionHandlers: actionHandlers,
}
tr, err := trie.NewTrieSharedBatch(ws.dao, ws.cb, trie.AccountKVNameSpace, root)
if err != nil {
return nil, errors.Wrap(err, "failed to generate state trie from config")
}
ws.accountTrie = tr
if err := ws.accountTrie.Start(context.Background()); err != nil {
return nil, errors.Wrapf(err, "failed to load state trie from root = %x", root)
}
return ws, nil
}
//======================================
// account functions
//======================================
// LoadOrCreateAccountState loads existing or adds a new account state with initial balance to the factory
// addr should be a bech32 properly-encoded string
func (ws *workingSet) LoadOrCreateAccountState(addr string, init *big.Int) (*Account, error) {
addrHash, err := iotxaddress.AddressToPKHash(addr)
if err != nil {
return nil, err
}
state, err := ws.CachedState(addrHash, &Account{})
switch {
case errors.Cause(err) == ErrStateNotExist:
account := Account{
Balance: init,
VotingWeight: big.NewInt(0),
}
ws.cachedStates[addrHash] = &account
return &account, nil
case err != nil:
return nil, errors.Wrapf(err, "failed to get account of %x from cached account", addrHash)
}
account, err := stateToAccountState(state)
if err != nil {
return nil, err
}
return account, nil
}
// Nonce returns the Nonce if the account exists
func (ws *workingSet) Nonce(addr string) (uint64, error) {
state, err := ws.accountState(addr)
if err != nil {
return 0, errors.Wrapf(err, "failed to get account state of %s", addr)
}
return state.Nonce, nil
}
// CachedAccountState returns the cached account state if the address exists in local cache
func (ws *workingSet) CachedAccountState(addr string) (*Account, error) {
addrHash, err := iotxaddress.AddressToPKHash(addr)
if err != nil {
return nil, err
}
if contract, ok := ws.cachedContract[addrHash]; ok {
return contract.SelfState(), nil
}
state, err := ws.CachedState(addrHash, &Account{})
if err != nil {
return nil, err
}
account, err := stateToAccountState(state)
if err != nil {
return nil, err
}
return account, nil
}
// RootHash returns the hash of the root node of the accountTrie
func (ws *workingSet) RootHash() hash.Hash32B {
return ws.accountTrie.RootHash()
}
// Version returns the Version of this working set
func (ws *workingSet) Version() uint64 {
return ws.ver
}
// Height returns the Height of the block being worked on
func (ws *workingSet) Height() uint64 {
return ws.blkHeight
}
// RunActions runs actions in the block and track pending changes in working set
func (ws *workingSet) RunActions(
ctx context.Context,
blockHeight uint64,
actions []action.Action,
) (hash.Hash32B, map[hash.Hash32B]*action.Receipt, error) {
ws.blkHeight = blockHeight
// Recover cachedCandidates after restart factory
if blockHeight > 0 && len(ws.cachedCandidates) == 0 {
candidates, err := ws.getCandidates(blockHeight - 1)
if err != nil {
logger.Info().Err(err).Msgf("No previous Candidates on Height %d", blockHeight-1)
candidates = CandidateList{}
}
if ws.cachedCandidates, err = CandidatesToMap(candidates); err != nil {
return hash.ZeroHash32B, nil,
errors.Wrap(err, "failed to convert candidate list to map of cached Candidates")
}
}
raCtx, ok := getRunActionsCtx(ctx)
if !ok {
return hash.ZeroHash32B, nil,
errors.New("failed to get RunActionsCtx")
}
// check producer
producer, err := ws.LoadOrCreateAccountState(raCtx.ProducerAddr, big.NewInt(0))
if err != nil {
return hash.ZeroHash32B, nil, errors.Wrapf(err, "failed to load or create the account of block producer %s", raCtx.ProducerAddr)
}
tsfs, votes, executions := action.ClassifyActions(actions)
if err := ws.handleTsf(producer, tsfs, raCtx.GasLimit, raCtx.EnableGasCharge); err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "failed to handle transfers")
}
if err := ws.handleVote(producer, blockHeight, votes, raCtx.GasLimit, raCtx.EnableGasCharge); err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "failed to handle votes")
}
// update pending account changes to trie
for addr, state := range ws.cachedStates {
if err := ws.PutState(addr, state); err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "failed to update pending account changes to trie")
}
account, err := stateToAccountState(state)
if err != nil {
return hash.ZeroHash32B, nil, err
}
// Perform vote update operation on candidate and delegate pools
if !account.IsCandidate {
// remove the candidate if the person is not a candidate anymore
if _, ok := ws.cachedCandidates[addr]; ok {
delete(ws.cachedCandidates, addr)
}
continue
}
totalWeight := big.NewInt(0)
totalWeight.Add(totalWeight, account.VotingWeight)
voteePKHash, err := iotxaddress.AddressToPKHash(account.Votee)
if err != nil {
return hash.ZeroHash32B, nil, err
}
if addr == voteePKHash {
totalWeight.Add(totalWeight, account.Balance)
}
ws.updateCandidate(addr, totalWeight, blockHeight)
}
// update pending contract changes
for addr, contract := range ws.cachedContract {
if err := contract.Commit(); err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "failed to update pending contract changes")
}
state := contract.SelfState()
// store the account (with new storage trie root) into account trie
if err := ws.PutState(addr, state); err != nil {
return hash.ZeroHash32B, nil,
errors.Wrap(err, "failed to update pending contract account changes to trie")
}
}
// increase Executor's Nonce for every execution in this block
for _, e := range executions {
executorPKHash, err := iotxaddress.AddressToPKHash(e.Executor())
if err != nil {
return hash.ZeroHash32B, nil, err
}
state, err := ws.CachedState(executorPKHash, &Account{})
if err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "executor does not exist")
}
account, err := stateToAccountState(state)
if err != nil {
return hash.ZeroHash32B, nil, err
}
if e.Nonce() > account.Nonce {
account.Nonce = e.Nonce()
}
if err := ws.PutState(executorPKHash, state); err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "failed to update pending account changes to trie")
}
}
// Handle actions
receipts := make(map[hash.Hash32B]*action.Receipt)
for _, act := range actions {
for _, actionHandler := range ws.actionHandlers {
receipt, err := actionHandler.Handle(ctx, act, ws)
if err != nil {
return hash.ZeroHash32B, nil, errors.Wrapf(
err,
"error when action %x (nonce: %d) from %s mutates states",
act.Hash(),
act.Nonce(),
act.SrcAddr(),
)
}
if receipt != nil {
receipts[act.Hash()] = receipt
}
}
}
// Persist accountTrie's root hash
rootHash := ws.accountTrie.RootHash()
ws.cb.Put(trie.AccountKVNameSpace, []byte(AccountTrieRootKey), rootHash[:], "failed to store accountTrie's root hash")
// Persist new list of Candidates
candidates, err := MapToCandidates(ws.cachedCandidates)
if err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "failed to convert map of cached Candidates to candidate list")
}
sort.Sort(candidates)
candidatesBytes, err := candidates.Serialize()
if err != nil {
return hash.ZeroHash32B, nil, errors.Wrap(err, "failed to serialize Candidates")
}
h := byteutil.Uint64ToBytes(blockHeight)
ws.cb.Put(trie.CandidateKVNameSpace, h, candidatesBytes, "failed to store Candidates on Height %d", blockHeight)
// Persist current chain Height
ws.cb.Put(trie.AccountKVNameSpace, []byte(CurrentHeightKey), h, "failed to store accountTrie's current Height")
return ws.RootHash(), receipts, nil
}
// Commit persists all changes in RunActions() into the DB
func (ws *workingSet) Commit() error {
// Commit all changes in a batch
if err := ws.dao.Commit(ws.cb); err != nil {
return errors.Wrap(err, "failed to Commit all changes to underlying DB in a batch")
}
ws.clearCache()
return nil
}
// UpdateCachedStates updates cached states
func (ws *workingSet) UpdateCachedStates(pkHash hash.PKHash, account *Account) {
ws.cachedStates[pkHash] = account
}
//======================================
// Contract functions
//======================================
// GetCodeHash returns contract's code hash
func (ws *workingSet) GetCodeHash(addr hash.PKHash) (hash.Hash32B, error) {
if contract, ok := ws.cachedContract[addr]; ok {
return byteutil.BytesTo32B(contract.SelfState().CodeHash), nil
}
state, err := ws.CachedState(addr, &Account{})
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to GetCodeHash for contract %x", addr)
}
account, err := stateToAccountState(state)
if err != nil {
return hash.ZeroHash32B, err
}
return byteutil.BytesTo32B(account.CodeHash), nil
}
// GetCode returns contract's code
func (ws *workingSet) GetCode(addr hash.PKHash) ([]byte, error) {
if contract, ok := ws.cachedContract[addr]; ok {
return contract.GetCode()
}
state, err := ws.CachedState(addr, &Account{})
if err != nil {
return nil, errors.Wrapf(err, "failed to GetCode for contract %x", addr)
}
account, err := stateToAccountState(state)
if err != nil {
return nil, err
}
return ws.dao.Get(trie.CodeKVNameSpace, account.CodeHash[:])
}
// SetCode sets contract's code
func (ws *workingSet) SetCode(addr hash.PKHash, code []byte) error {
if contract, ok := ws.cachedContract[addr]; ok {
contract.SetCode(byteutil.BytesTo32B(hash.Hash256b(code)), code)
return nil
}
contract, err := ws.getContract(addr)
if err != nil {
return errors.Wrapf(err, "failed to SetCode for contract %x", addr)
}
contract.SetCode(byteutil.BytesTo32B(hash.Hash256b(code)), code)
return nil
}
// GetContractState returns contract's storage value
func (ws *workingSet) GetContractState(addr hash.PKHash, key hash.Hash32B) (hash.Hash32B, error) {
if contract, ok := ws.cachedContract[addr]; ok {
v, err := contract.GetState(key)
return byteutil.BytesTo32B(v), err
}
contract, err := ws.getContract(addr)
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to GetContractState for contract %x", addr)
}
v, err := contract.GetState(key)
return byteutil.BytesTo32B(v), err
}
// SetContractState writes contract's storage value
func (ws *workingSet) SetContractState(addr hash.PKHash, key, value hash.Hash32B) error {
if contract, ok := ws.cachedContract[addr]; ok {
return contract.SetState(key, value[:])
}
contract, err := ws.getContract(addr)
if err != nil {
return errors.Wrapf(err, "failed to SetContractState for contract %x", addr)
}
return contract.SetState(key, value[:])
}
//======================================
// private account/contract functions
//======================================
// state pulls a state from DB
func (ws *workingSet) State(hash hash.PKHash, s State) (State, error) {
mstate, err := ws.accountTrie.Get(hash[:])
if errors.Cause(err) == trie.ErrNotExist {
return nil, errors.Wrapf(ErrStateNotExist, "addrHash = %x", hash[:])
}
if err != nil {
return nil, errors.Wrapf(err, "failed to get account of %x", hash)
}
if err := s.Deserialize(mstate); err != nil {
return nil, err
}
return s, nil
}
// accountState returns the confirmed account state on the chain
func (ws *workingSet) accountState(addr string) (*Account, error) {
addrHash, err := iotxaddress.AddressToPKHash(addr)
if err != nil {
return nil, err
}
state, err := ws.State(addrHash, &Account{})
if err != nil {
return nil, err
}
account, err := stateToAccountState(state)
if err != nil {
return nil, err
}
return account, nil
}
// cachedState pulls a state from cache first. If missing, it will hit DB
func (ws *workingSet) CachedState(hash hash.PKHash, s State) (State, error) {
if state, ok := ws.cachedStates[hash]; ok {
return state, nil
}
// add to local cache
state, err := ws.State(hash, s)
if state != nil {
ws.cachedStates[hash] = state
}
return state, err
}
// putState put a state into DB
func (ws *workingSet) PutState(pkHash hash.PKHash, state State) error {
ss, err := state.Serialize()
if err != nil {
return errors.Wrapf(err, "failed to convert account %v to bytes", state)
}
return ws.accountTrie.Upsert(pkHash[:], ss)
}
func (ws *workingSet) getContract(addr hash.PKHash) (Contract, error) {
state, err := ws.CachedState(addr, &Account{})
if err != nil {
return nil, errors.Wrapf(err, "failed to get the cached account of %x", addr)
}
account, err := stateToAccountState(state)
if err != nil {
return nil, err
}
delete(ws.cachedStates, addr)
if account.Root == hash.ZeroHash32B {
account.Root = trie.EmptyRoot
}
tr, err := trie.NewTrieSharedBatch(ws.dao, ws.cb, trie.ContractKVNameSpace, account.Root)
if err != nil {
return nil, errors.Wrapf(err, "failed to create storage trie for new contract %x", addr)
}
// add to contract cache
contract := newContract(account, tr)
ws.cachedContract[addr] = contract
return contract, nil
}
// clearCache removes all local changes after committing to trie
func (ws *workingSet) clearCache() {
ws.cachedStates = nil
ws.cachedContract = nil
ws.cachedCandidates = nil
ws.cachedStates = make(map[hash.PKHash]State)
ws.cachedContract = make(map[hash.PKHash]Contract)
ws.cachedCandidates = make(map[hash.PKHash]*Candidate)
}
//======================================
// private candidate functions
//======================================
func (ws *workingSet) updateCandidate(pkHash hash.PKHash, totalWeight *big.Int, blockHeight uint64) {
// Candidate was added when self-nomination, always exist in cachedCandidates
candidate := ws.cachedCandidates[pkHash]
candidate.Votes = totalWeight
candidate.LastUpdateHeight = blockHeight
}
func (ws *workingSet) getCandidates(height uint64) (CandidateList, error) {
candidatesBytes, err := ws.dao.Get(trie.CandidateKVNameSpace, byteutil.Uint64ToBytes(height))
if err != nil {
return nil, errors.Wrapf(err, "failed to get Candidates on Height %d", height)
}
var candidates CandidateList
if err := candidates.Deserialize(candidatesBytes); err != nil {
return nil, err
}
return candidates, nil
}
//======================================
// private transfer/vote functions
//======================================
func (ws *workingSet) handleTsf(producer *Account, tsfs []*action.Transfer, gasLimit *uint64, enableGasCharge bool) error {
for _, tx := range tsfs {
if tx.IsContract() {
continue
}
if !tx.IsCoinbase() {
// check sender
sender, err := ws.LoadOrCreateAccountState(tx.Sender(), big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of sender %s", tx.Sender())
}
if enableGasCharge {
gas, err := tx.IntrinsicGas()
if err != nil {
return errors.Wrapf(err, "failed to get intrinsic gas for transfer hash %s", tx.Hash())
}
if *gasLimit < gas {
return vm.ErrOutOfGas
}
gasFee := big.NewInt(0).Mul(tx.GasPrice(), big.NewInt(0).SetUint64(gas))
if big.NewInt(0).Add(tx.Amount(), gasFee).Cmp(sender.Balance) == 1 {
return errors.Wrapf(ErrNotEnoughBalance, "failed to verify the Balance of sender %s", tx.Sender())
}
// charge sender gas
if err := sender.SubBalance(gasFee); err != nil {
return errors.Wrapf(err, "failed to charge the gas for sender %s", tx.Sender())
}
// compensate block producer gas
if err := producer.AddBalance(gasFee); err != nil {
return errors.Wrapf(err, "failed to compensate gas to producer")
}
*gasLimit -= gas
}
// update sender Balance
if err := sender.SubBalance(tx.Amount()); err != nil {
return errors.Wrapf(err, "failed to update the Balance of sender %s", tx.Sender())
}
// update sender Nonce
if tx.Nonce() > sender.Nonce {
sender.Nonce = tx.Nonce()
}
// Update sender votes
if len(sender.Votee) > 0 && sender.Votee != tx.Sender() {
// sender already voted to a different person
voteeOfSender, err := ws.LoadOrCreateAccountState(sender.Votee, big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of sender's votee %s", sender.Votee)
}
voteeOfSender.VotingWeight.Sub(voteeOfSender.VotingWeight, tx.Amount())
}
}
// check recipient
recipient, err := ws.LoadOrCreateAccountState(tx.Recipient(), big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to laod or create the account of recipient %s", tx.Recipient())
}
if err := recipient.AddBalance(tx.Amount()); err != nil {
return errors.Wrapf(err, "failed to update the Balance of recipient %s", tx.Recipient())
}
// Update recipient votes
if len(recipient.Votee) > 0 && recipient.Votee != tx.Recipient() {
// recipient already voted to a different person
voteeOfRecipient, err := ws.LoadOrCreateAccountState(recipient.Votee, big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of recipient's votee %s", recipient.Votee)
}
voteeOfRecipient.VotingWeight.Add(voteeOfRecipient.VotingWeight, tx.Amount())
}
}
return nil
}
func (ws *workingSet) handleVote(producer *Account, blockHeight uint64, votes []*action.Vote, gasLimit *uint64, enableGasCharge bool) error {
for _, v := range votes {
voteFrom, err := ws.LoadOrCreateAccountState(v.Voter(), big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of voter %s", v.Voter())
}
voterPKHash, err := iotxaddress.AddressToPKHash(v.Voter())
if err != nil {
return err
}
if enableGasCharge {
gas, err := v.IntrinsicGas()
if err != nil {
return errors.Wrapf(err, "failed to get intrinsic gas for vote hash %s", v.Hash())
}
if *gasLimit < gas {
return vm.ErrOutOfGas
}
gasFee := big.NewInt(0).Mul(v.GasPrice(), big.NewInt(0).SetUint64(gas))
if gasFee.Cmp(voteFrom.Balance) == 1 {
return errors.Wrapf(ErrNotEnoughBalance, "failed to verify the Balance for gas of voter %s, %d, %d", v.Voter(), gas, voteFrom.Balance)
}
// charge voter Gas
if err := voteFrom.SubBalance(gasFee); err != nil {
return errors.Wrapf(err, "failed to charge the gas for voter %s", v.Voter())
}
// compensate block producer gas
if err := producer.AddBalance(gasFee); err != nil {
return errors.Wrapf(err, "failed to compensate gas to producer")
}
*gasLimit -= gas
}
// update voteFrom Nonce
if v.Nonce() > voteFrom.Nonce {
voteFrom.Nonce = v.Nonce()
}
// Update old votee's weight
if len(voteFrom.Votee) > 0 && voteFrom.Votee != v.Voter() {
// voter already voted
oldVotee, err := ws.LoadOrCreateAccountState(voteFrom.Votee, big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of voter's old votee %s", voteFrom.Votee)
}
oldVotee.VotingWeight.Sub(oldVotee.VotingWeight, voteFrom.Balance)
voteFrom.Votee = ""
}
if v.Votee() == "" {
// unvote operation
voteFrom.IsCandidate = false
continue
}
voteTo, err := ws.LoadOrCreateAccountState(v.Votee(), big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of votee %s", v.Votee())
}
if v.Voter() != v.Votee() {
// Voter votes to a different person
voteTo.VotingWeight.Add(voteTo.VotingWeight, voteFrom.Balance)
voteFrom.Votee = v.Votee()
} else {
// Vote to self: self-nomination or cancel the previous vote case
voteFrom.Votee = v.Voter()
voteFrom.IsCandidate = true
votePubkey := v.VoterPublicKey()
if _, ok := ws.cachedCandidates[voterPKHash]; !ok {
ws.cachedCandidates[voterPKHash] = &Candidate{
Address: v.Voter(),
PublicKey: votePubkey,
CreationHeight: blockHeight,
}
}
}
}
return nil
}
func stateToAccountState(state State) (*Account, error) {
account, ok := state.(*Account)
if !ok {
return nil, fmt.Errorf("error when casting %T state into account state", state)
}
return account, nil
}
| 1 | 13,304 | safer to make a copy of incoming *big.Int | iotexproject-iotex-core | go |
@@ -19,6 +19,8 @@ import "time"
const (
// ListContainersTimeout is the timeout for the ListContainers API.
ListContainersTimeout = 10 * time.Minute
+ // ListImagesTimeout is the timeout for the ListImages API
+ ListImagesTimeout = 10 * time.Minute
// LoadImageTimeout is the timeout for the LoadImage API. It's set
// to much lower value than pullImageTimeout as it involves loading
// image from either a file or STDIN | 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package dockerclient
import "time"
// Timelimits for docker operations enforced above docker
const (
// ListContainersTimeout is the timeout for the ListContainers API.
ListContainersTimeout = 10 * time.Minute
// LoadImageTimeout is the timeout for the LoadImage API. It's set
// to much lower value than pullImageTimeout as it involves loading
// image from either a file or STDIN
// calls involved.
// TODO: Benchmark and re-evaluate this value
LoadImageTimeout = 10 * time.Minute
// CreateContainerTimeout is the timeout for the CreateContainer API.
CreateContainerTimeout = 4 * time.Minute
// StopContainerTimeout is the timeout for the StopContainer API.
StopContainerTimeout = 30 * time.Second
// RemoveContainerTimeout is the timeout for the RemoveContainer API.
RemoveContainerTimeout = 5 * time.Minute
// InspectContainerTimeout is the timeout for the InspectContainer API.
InspectContainerTimeout = 30 * time.Second
// RemoveImageTimeout is the timeout for the RemoveImage API.
RemoveImageTimeout = 3 * time.Minute
// VersionTimeout is the timeout for the Version API
VersionTimeout = 10 * time.Second
)
| 1 | 21,614 | how did we choose this value? does this hold good for Windows too? | aws-amazon-ecs-agent | go |
@@ -0,0 +1,17 @@
+// +build nocriu
+
+package libcontainer
+
+import (
+ "errors"
+)
+
+var errNotEnabled = errors.New("Checkpoint/restore not supported")
+
+func (c *linuxContainer) Checkpoint(opts *CheckpointOpts) error {
+ return errNotEnabled
+}
+
+func (c *linuxContainer) Restore(process *Process, opts *CheckpointOpts) error {
+ return errNotEnabled
+} | 1 | 1 | 10,492 | To follow the other build tags, maybe we can call it `criu`? | opencontainers-runc | go |
|
@@ -20,9 +20,10 @@ module Bolt
log_destination: STDERR
}.freeze
- TRANSPORT_OPTIONS = %i[insecure password run_as sudo sudo_password key tty user].freeze
+ TRANSPORT_OPTIONS = %i[insecure password run_as sudo sudo_password key tty user connect_timeout].freeze
TRANSPORT_DEFAULTS = {
+ connect_timeout: 10,
insecure: false,
tty: false
}.freeze | 1 | require 'logger'
require 'yaml'
module Bolt
Config = Struct.new(
:concurrency,
:format,
:log_destination,
:log_level,
:modulepath,
:transport,
:transports
) do
DEFAULTS = {
concurrency: 100,
transport: 'ssh',
format: 'human',
log_level: Logger::WARN,
log_destination: STDERR
}.freeze
TRANSPORT_OPTIONS = %i[insecure password run_as sudo sudo_password key tty user].freeze
TRANSPORT_DEFAULTS = {
insecure: false,
tty: false
}.freeze
TRANSPORTS = %i[ssh winrm pcp].freeze
def initialize(**kwargs)
super()
DEFAULTS.merge(kwargs).each { |k, v| self[k] = v }
self[:transports] ||= {}
TRANSPORTS.each do |transport|
unless self[:transports][transport]
self[:transports][transport] = {}
end
TRANSPORT_DEFAULTS.each do |k, v|
unless self[:transports][transport][k]
self[:transports][transport][k] = v
end
end
end
end
def default_path
path = ['.puppetlabs', 'bolt.yml']
root_path = '~'
File.join(root_path, *path)
end
def read_config_file(path)
path_passed = path
path ||= default_path
path = File.expand_path(path)
# safe_load doesn't work with psych in ruby 2.0
# The user controls the configfile so this isn't a problem
# rubocop:disable YAMLLoad
File.open(path, "r:UTF-8") { |f| YAML.load(f.read) }
rescue Errno::ENOENT
if path_passed
raise Bolt::CLIError, "Could not read config file: #{path}"
end
# In older releases of psych SyntaxError is not a subclass of Exception
rescue Psych::SyntaxError
raise Bolt::CLIError, "Could not parse config file: #{path}"
rescue Psych::Exception
raise Bolt::CLIError, "Could not parse config file: #{path}"
rescue IOError, SystemCallError
raise Bolt::CLIError, "Could not read config file: #{path}"
end
def update_from_file(data)
if data['modulepath']
self[:modulepath] = data['modulepath'].split(File::PATH_SEPARATOR)
end
if data['concurrency']
self[:concurrency] = data['concurrency']
end
if data['format']
self[:format] = data['format'] if data['format']
end
if data['ssh']
if data['ssh']['private-key']
self[:transports][:ssh][:key] = data['ssh']['private-key']
end
if data['ssh']['insecure']
self[:transports][:ssh][:insecure] = data['ssh']['insecure']
end
end
# if data['pcp']
# end
# if data['winrm']
# end
end
def load_file(path)
data = read_config_file(path)
update_from_file(data) if data
end
def update_from_cli(options)
%i[concurrency transport format modulepath].each do |key|
self[key] = options[key] if options[key]
end
if options[:debug]
self[:log_level] = Logger::DEBUG
elsif options[:verbose]
self[:log_level] = Logger::INFO
end
TRANSPORT_OPTIONS.each do |key|
# TODO: We should eventually make these transport specific
TRANSPORTS.each do |transport|
self[:transports][transport][key] = options[key] if options[key]
end
end
end
def validate
TRANSPORTS.each do |transport|
tconf = self[:transports][transport]
if tconf[:sudo] && tconf[:sudo] != 'sudo'
raise Bolt::CLIError, "Only 'sudo' is supported for privilege escalation."
end
end
unless %w[human json].include? self[:format]
raise Bolt::CLIError, "Unsupported format: '#{self[:format]}'"
end
end
end
end
| 1 | 6,938 | Should we load this from the config file too? | puppetlabs-bolt | rb |
@@ -128,6 +128,12 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
}
nextRecover := now.Add(*duration)
+ // TODO(at15): this should be validated in webhook instead of throwing error at runtime.
+ // User can give a cron with interval shorter than duration.
+ // Example:
+ // duration: "10s"
+ // scheduler:
+ // cron: "@every 5s
if nextStart.Before(nextRecover) {
err := fmt.Errorf("nextRecover shouldn't be later than nextStart")
r.Log.Error(err, "nextRecover is later than nextStart. Then recover can never be reached", | 1 | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package twophase
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pingcap/chaos-mesh/api/v1alpha1"
"github.com/pingcap/chaos-mesh/controllers/reconciler"
"github.com/pingcap/chaos-mesh/pkg/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// InnerSchedulerObject is the Object for the twophase reconcile
type InnerSchedulerObject interface {
reconciler.InnerObject
GetDuration() (*time.Duration, error)
GetNextStart() time.Time
SetNextStart(time.Time)
GetNextRecover() time.Time
SetNextRecover(time.Time)
GetScheduler() *v1alpha1.SchedulerSpec
}
// Reconciler for the twophase reconciler
type Reconciler struct {
reconciler.InnerReconciler
client.Client
Log logr.Logger
}
// NewReconciler would create reconciler for twophase controller
func NewReconciler(r reconciler.InnerReconciler, client client.Client, log logr.Logger) *Reconciler {
return &Reconciler{
InnerReconciler: r,
Client: client,
Log: log,
}
}
func (r *Reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
var err error
now := time.Now()
r.Log.Info("Reconciling a two phase chaos", "name", req.Name, "namespace", req.Namespace)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_chaos := r.Object()
if err = r.Get(ctx, req.NamespacedName, _chaos); err != nil {
r.Log.Error(err, "unable to get chaos")
return ctrl.Result{}, err
}
chaos := _chaos.(InnerSchedulerObject)
duration, err := chaos.GetDuration()
if err != nil {
r.Log.Error(err, "failed to get chaos duration")
return ctrl.Result{}, err
}
scheduler := chaos.GetScheduler()
if scheduler == nil {
r.Log.Info("Scheduler should be defined currently")
return ctrl.Result{}, fmt.Errorf("misdefined scheduler")
}
if duration == nil {
zero := 0 * time.Second
duration = &zero
}
status := chaos.GetStatus()
if chaos.IsDeleted() {
// This chaos was deleted
r.Log.Info("Removing self")
err = r.Recover(ctx, req, chaos)
if err != nil {
r.Log.Error(err, "failed to recover chaos")
return ctrl.Result{Requeue: true}, err
}
status.Experiment.Phase = v1alpha1.ExperimentPhaseFinished
} else if !chaos.GetNextRecover().IsZero() && chaos.GetNextRecover().Before(now) {
// Start recover
r.Log.Info("Recovering")
err = r.Recover(ctx, req, chaos)
if err != nil {
r.Log.Error(err, "failed to recover chaos")
return ctrl.Result{Requeue: true}, err
}
chaos.SetNextRecover(time.Time{})
status.Experiment.EndTime = &metav1.Time{
Time: time.Now(),
}
status.Experiment.Phase = v1alpha1.ExperimentPhaseFinished
} else if chaos.GetNextStart().Before(now) {
nextStart, err := utils.NextTime(*chaos.GetScheduler(), now)
if err != nil {
r.Log.Error(err, "failed to get next start time")
return ctrl.Result{}, err
}
nextRecover := now.Add(*duration)
if nextStart.Before(nextRecover) {
err := fmt.Errorf("nextRecover shouldn't be later than nextStart")
r.Log.Error(err, "nextRecover is later than nextStart. Then recover can never be reached",
"nextRecover", nextRecover, "nextStart", nextStart)
return ctrl.Result{}, err
}
r.Log.Info("Chaos action:", "chaos", chaos)
// Start to apply action
r.Log.Info("Performing Action")
err = r.Apply(ctx, req, chaos)
if err != nil {
r.Log.Error(err, "failed to apply chaos action")
status.Experiment.Phase = v1alpha1.ExperimentPhaseFailed
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
return r.Update(ctx, chaos)
})
if updateError != nil {
r.Log.Error(updateError, "unable to update chaos finalizers")
}
return ctrl.Result{Requeue: true}, err
}
status.Experiment.StartTime = &metav1.Time{
Time: time.Now(),
}
status.Experiment.Phase = v1alpha1.ExperimentPhaseRunning
chaos.SetNextStart(*nextStart)
chaos.SetNextRecover(nextRecover)
} else {
nextTime := chaos.GetNextStart()
if !chaos.GetNextRecover().IsZero() && chaos.GetNextRecover().Before(nextTime) {
nextTime = chaos.GetNextRecover()
}
duration := nextTime.Sub(now)
r.Log.Info("Requeue request", "after", duration)
return ctrl.Result{RequeueAfter: duration}, nil
}
if err := r.Update(ctx, chaos); err != nil {
r.Log.Error(err, "unable to update chaosctl status")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
| 1 | 14,268 | Can you file an issue for this? | chaos-mesh-chaos-mesh | go |
@@ -122,7 +122,7 @@ class Connection extends EventEmitter {
if (issue.isTimeout) {
op.cb(
new MongoNetworkTimeoutError(`connection ${this.id} to ${this.address} timed out`, {
- beforeHandshake: !!this[kIsMaster]
+ beforeHandshake: this[kIsMaster] == null
})
);
} else if (issue.isClose) { | 1 | 'use strict';
const EventEmitter = require('events');
const MessageStream = require('./message_stream');
const MongoError = require('../core/error').MongoError;
const MongoNetworkError = require('../core/error').MongoNetworkError;
const MongoNetworkTimeoutError = require('../core/error').MongoNetworkTimeoutError;
const MongoWriteConcernError = require('../core/error').MongoWriteConcernError;
const CommandResult = require('../core/connection/command_result');
const StreamDescription = require('./stream_description').StreamDescription;
const wp = require('../core/wireprotocol');
const apm = require('../core/connection/apm');
const updateSessionFromResponse = require('../core/sessions').updateSessionFromResponse;
const uuidV4 = require('../core/utils').uuidV4;
const now = require('../utils').now;
const calculateDurationInMs = require('../utils').calculateDurationInMs;
const kStream = Symbol('stream');
const kQueue = Symbol('queue');
const kMessageStream = Symbol('messageStream');
const kGeneration = Symbol('generation');
const kLastUseTime = Symbol('lastUseTime');
const kClusterTime = Symbol('clusterTime');
const kDescription = Symbol('description');
const kIsMaster = Symbol('ismaster');
const kAutoEncrypter = Symbol('autoEncrypter');
class Connection extends EventEmitter {
constructor(stream, options) {
super(options);
this.id = options.id;
this.address = streamIdentifier(stream);
this.bson = options.bson;
this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 0;
this.host = options.host || 'localhost';
this.port = options.port || 27017;
this.monitorCommands =
typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false;
this.closed = false;
this.destroyed = false;
this[kDescription] = new StreamDescription(this.address, options);
this[kGeneration] = options.generation;
this[kLastUseTime] = now();
// retain a reference to an `AutoEncrypter` if present
if (options.autoEncrypter) {
this[kAutoEncrypter] = options.autoEncrypter;
}
// setup parser stream and message handling
this[kQueue] = new Map();
this[kMessageStream] = new MessageStream(options);
this[kMessageStream].on('message', messageHandler(this));
this[kStream] = stream;
stream.on('error', () => {
/* ignore errors, listen to `close` instead */
});
this[kMessageStream].on('error', error => this.handleIssue({ destroy: error }));
stream.on('close', () => this.handleIssue({ isClose: true }));
stream.on('timeout', () => this.handleIssue({ isTimeout: true, destroy: true }));
// hook the message stream up to the passed in stream
stream.pipe(this[kMessageStream]);
this[kMessageStream].pipe(stream);
}
get description() {
return this[kDescription];
}
get ismaster() {
return this[kIsMaster];
}
// the `connect` method stores the result of the handshake ismaster on the connection
set ismaster(response) {
this[kDescription].receiveResponse(response);
// TODO: remove this, and only use the `StreamDescription` in the future
this[kIsMaster] = response;
}
get generation() {
return this[kGeneration] || 0;
}
get idleTime() {
return calculateDurationInMs(this[kLastUseTime]);
}
get clusterTime() {
return this[kClusterTime];
}
get stream() {
return this[kStream];
}
markAvailable() {
this[kLastUseTime] = now();
}
/**
* @param {{ isTimeout?: boolean; isClose?: boolean; destroy?: boolean | Error }} issue
*/
handleIssue(issue) {
if (this.closed) {
return;
}
if (issue.destroy) {
this[kStream].destroy(typeof issue.destroy === 'boolean' ? undefined : issue.destroy);
}
this.closed = true;
for (const idAndOp of this[kQueue]) {
const op = idAndOp[1];
if (issue.isTimeout) {
op.cb(
new MongoNetworkTimeoutError(`connection ${this.id} to ${this.address} timed out`, {
beforeHandshake: !!this[kIsMaster]
})
);
} else if (issue.isClose) {
op.cb(new MongoNetworkError(`connection ${this.id} to ${this.address} closed`));
} else {
op.cb(typeof issue.destroy === 'boolean' ? undefined : issue.destroy);
}
}
this[kQueue].clear();
this.emit('close');
}
destroy(options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options = Object.assign({ force: false }, options);
if (this[kStream] == null || this.destroyed) {
this.destroyed = true;
if (typeof callback === 'function') {
callback();
}
return;
}
if (options.force) {
this[kStream].destroy();
this.destroyed = true;
if (typeof callback === 'function') {
callback();
}
return;
}
this[kStream].end(err => {
this.destroyed = true;
if (typeof callback === 'function') {
callback(err);
}
});
}
// Wire protocol methods
command(ns, cmd, options, callback) {
wp.command(makeServerTrampoline(this), ns, cmd, options, callback);
}
query(ns, cmd, cursorState, options, callback) {
wp.query(makeServerTrampoline(this), ns, cmd, cursorState, options, callback);
}
getMore(ns, cursorState, batchSize, options, callback) {
wp.getMore(makeServerTrampoline(this), ns, cursorState, batchSize, options, callback);
}
killCursors(ns, cursorState, callback) {
wp.killCursors(makeServerTrampoline(this), ns, cursorState, callback);
}
insert(ns, ops, options, callback) {
wp.insert(makeServerTrampoline(this), ns, ops, options, callback);
}
update(ns, ops, options, callback) {
wp.update(makeServerTrampoline(this), ns, ops, options, callback);
}
remove(ns, ops, options, callback) {
wp.remove(makeServerTrampoline(this), ns, ops, options, callback);
}
}
/// This lets us emulate a legacy `Server` instance so we can work with the existing wire
/// protocol methods. Eventually, the operation executor will return a `Connection` to execute
/// against.
function makeServerTrampoline(connection) {
const server = {
description: connection.description,
clusterTime: connection[kClusterTime],
s: {
bson: connection.bson,
pool: { write: write.bind(connection), isConnected: () => true }
}
};
if (connection[kAutoEncrypter]) {
server.autoEncrypter = connection[kAutoEncrypter];
}
return server;
}
function messageHandler(conn) {
return function messageHandler(message) {
// always emit the message, in case we are streaming
conn.emit('message', message);
if (!conn[kQueue].has(message.responseTo)) {
return;
}
const operationDescription = conn[kQueue].get(message.responseTo);
const callback = operationDescription.cb;
// SERVER-45775: For exhaust responses we should be able to use the same requestId to
// track response, however the server currently synthetically produces remote requests
// making the `responseTo` change on each response
conn[kQueue].delete(message.responseTo);
if (message.moreToCome) {
// requeue the callback for next synthetic request
conn[kQueue].set(message.requestId, operationDescription);
} else if (operationDescription.socketTimeoutOverride) {
conn[kStream].setTimeout(conn.socketTimeout);
}
try {
// Pass in the entire description because it has BSON parsing options
message.parse(operationDescription);
} catch (err) {
callback(new MongoError(err));
return;
}
if (message.documents[0]) {
const document = message.documents[0];
const session = operationDescription.session;
if (session) {
updateSessionFromResponse(session, document);
}
if (document.$clusterTime) {
conn[kClusterTime] = document.$clusterTime;
conn.emit('clusterTimeReceived', document.$clusterTime);
}
if (operationDescription.command) {
if (document.writeConcernError) {
callback(new MongoWriteConcernError(document.writeConcernError, document));
return;
}
if (document.ok === 0 || document.$err || document.errmsg || document.code) {
callback(new MongoError(document));
return;
}
}
}
// NODE-2382: reenable in our glorious non-leaky abstraction future
// callback(null, operationDescription.fullResult ? message : message.documents[0]);
callback(
undefined,
new CommandResult(
operationDescription.fullResult ? message : message.documents[0],
conn,
message
)
);
};
}
function streamIdentifier(stream) {
if (typeof stream.address === 'function') {
return `${stream.remoteAddress}:${stream.remotePort}`;
}
return uuidV4().toString('hex');
}
// Not meant to be called directly, the wire protocol methods call this assuming it is a `Pool` instance
function write(command, options, callback) {
if (typeof options === 'function') {
callback = options;
}
options = options || {};
const operationDescription = {
requestId: command.requestId,
cb: callback,
session: options.session,
fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false,
noResponse: typeof options.noResponse === 'boolean' ? options.noResponse : false,
documentsReturnedIn: options.documentsReturnedIn,
command: !!options.command,
// for BSON parsing
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
raw: typeof options.raw === 'boolean' ? options.raw : false
};
if (this[kDescription] && this[kDescription].compressor) {
operationDescription.agreedCompressor = this[kDescription].compressor;
if (this[kDescription].zlibCompressionLevel) {
operationDescription.zlibCompressionLevel = this[kDescription].zlibCompressionLevel;
}
}
if (typeof options.socketTimeout === 'number') {
operationDescription.socketTimeoutOverride = true;
this[kStream].setTimeout(options.socketTimeout);
}
// if command monitoring is enabled we need to modify the callback here
if (this.monitorCommands) {
this.emit('commandStarted', new apm.CommandStartedEvent(this, command));
operationDescription.started = now();
operationDescription.cb = (err, reply) => {
if (err) {
this.emit(
'commandFailed',
new apm.CommandFailedEvent(this, command, err, operationDescription.started)
);
} else {
if (reply && reply.result && (reply.result.ok === 0 || reply.result.$err)) {
this.emit(
'commandFailed',
new apm.CommandFailedEvent(this, command, reply.result, operationDescription.started)
);
} else {
this.emit(
'commandSucceeded',
new apm.CommandSucceededEvent(this, command, reply, operationDescription.started)
);
}
}
if (typeof callback === 'function') {
callback(err, reply);
}
};
}
if (!operationDescription.noResponse) {
this[kQueue].set(operationDescription.requestId, operationDescription);
}
try {
this[kMessageStream].writeCommand(command, operationDescription);
} catch (e) {
if (!operationDescription.noResponse) {
this[kQueue].delete(operationDescription.requestId);
operationDescription.cb(e);
return;
}
}
if (operationDescription.noResponse) {
operationDescription.cb();
}
}
module.exports = {
Connection
};
| 1 | 19,840 | @nbbeeken what were the cases where `!!this[kIsMaster]` was yielding an incorrect value? we should try to cover them in the tests | mongodb-node-mongodb-native | js |
@@ -51,6 +51,19 @@ module Bolt
end
end
+ def setup_inventory(inventory)
+ config = Bolt::Config.default
+ config.overwrite_transport_data(inventory['config']['transport'],
+ Bolt::Util.symbolize_top_level_keys(inventory['config']['transports']))
+
+ bolt_inventory = Bolt::Inventory.new(inventory['data'],
+ config,
+ Bolt::Util.symbolize_top_level_keys(inventory['target_hash']))
+
+ bolt_inventory.collect_groups
+ bolt_inventory
+ end
+
def compile_catalog(request)
pal_main = request['code_ast'] || request['code_string']
target = request['target'] | 1 | # frozen_string_literal: true
require 'bolt/pal'
require 'bolt/puppetdb'
Bolt::PAL.load_puppet
require 'bolt/catalog/compiler'
require 'bolt/catalog/logging'
module Bolt
class Catalog
def with_puppet_settings(hiera_config)
Dir.mktmpdir('bolt') do |dir|
cli = []
Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting|
cli << "--#{setting}" << dir
end
Puppet.settings.send(:clear_everything_for_tests)
Puppet.initialize_settings(cli)
Puppet.settings[:hiera_config] = hiera_config
# Use a special logdest that serializes all log messages and their level to stderr.
Puppet::Util::Log.newdestination(:stderr)
Puppet.settings[:log_level] = 'debug'
yield
end
end
def setup_node(node, trusted)
facts = Puppet.lookup(:pal_facts)
node_facts = Puppet::Node::Facts.new(Puppet[:node_name_value], facts)
node.fact_merge(node_facts)
node.parameters = node.parameters.merge(Puppet.lookup(:pal_variables))
node.trusted_data = trusted
end
def compile_node(node)
compiler = Puppet::Parser::BoltCompiler.new(node)
compiler.compile(&:to_resource)
end
def generate_ast(code)
with_puppet_settings do
Puppet::Pal.in_tmp_environment("bolt_parse") do |_pal|
node = Puppet.lookup(:pal_current_node)
compiler = Puppet::Parser::BoltCompiler.new(node)
compiler.dump_ast(compiler.parse_string(code))
end
end
end
def compile_catalog(request)
pal_main = request['code_ast'] || request['code_string']
target = request['target']
pdb_client = Bolt::PuppetDB::Client.new(Bolt::PuppetDB::Config.new(request['pdb_config']))
with_puppet_settings(request['hiera_config']) do
Puppet[:code] = ''
Puppet[:node_name_value] = target['name']
Puppet::Pal.in_tmp_environment(
'bolt_catalog',
modulepath: request["modulepath"] || [],
facts: target["facts"] || {},
variables: target["variables"] || {}
) do |_pal|
node = Puppet.lookup(:pal_current_node)
setup_node(node, target["trusted"])
Puppet.override(pal_main: pal_main, bolt_pdb_client: pdb_client) do
compile_node(node)
end
end
end
end
end
end
| 1 | 9,078 | This whole function feels messy. I don't have a better idea at the moment though. | puppetlabs-bolt | rb |
@@ -35,12 +35,9 @@ public class SetNetworkConnection extends WebDriverHandler<Number> implements Js
@SuppressWarnings("unchecked")
@Override
public void setJsonParameters(Map<String, Object> allParameters) throws Exception {
- Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters");
- Map<String, Object> typeMap = parameters.get("type");
-
- type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()),
- Boolean.parseBoolean(typeMap.get("dataEnabled").toString()),
- Boolean.parseBoolean(typeMap.get("airplaneMode").toString()));
+ Map<String, Object> parameters = (Map<String, Object>)allParameters.get("parameters");
+ Long bitmask = (Long) parameters.get("type");
+ type = new ConnectionType(bitmask.intValue());
}
@Override | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.server.handler.mobile;
import java.util.Map;
import org.openqa.selenium.mobile.NetworkConnection.ConnectionType;
import org.openqa.selenium.remote.server.JsonParametersAware;
import org.openqa.selenium.remote.server.Session;
import org.openqa.selenium.remote.server.handler.WebDriverHandler;
import org.openqa.selenium.remote.server.handler.html5.Utils;
public class SetNetworkConnection extends WebDriverHandler<Number> implements JsonParametersAware {
private volatile ConnectionType type;
public SetNetworkConnection(Session session) {
super(session);
}
@SuppressWarnings("unchecked")
@Override
public void setJsonParameters(Map<String, Object> allParameters) throws Exception {
Map<String, Map<String, Object>> parameters = (Map<String, Map<String, Object>>)allParameters.get("parameters");
Map<String, Object> typeMap = parameters.get("type");
type = new ConnectionType(Boolean.parseBoolean(typeMap.get("wifiEnabled").toString()),
Boolean.parseBoolean(typeMap.get("dataEnabled").toString()),
Boolean.parseBoolean(typeMap.get("airplaneMode").toString()));
}
@Override
public Number call() throws Exception {
return Integer.parseInt(Utils.getNetworkConnection(getUnwrappedDriver()).setNetworkConnection(type).toString());
}
@Override
public String toString() {
return String.format("[set network connection : %s]", type.toString());
}
}
| 1 | 13,952 | should use Number instead of Long | SeleniumHQ-selenium | py |
@@ -0,0 +1,18 @@
+package mock
+
+import (
+ "context"
+
+ "github.com/influxdata/flux/dependencies/influxdb"
+)
+
+type MockProvider struct {
+ influxdb.UnimplementedProvider
+ WriterForFn func(ctx context.Context, conf influxdb.Config) (influxdb.Writer, error)
+}
+
+var _ influxdb.Provider = &MockProvider{}
+
+func (m MockProvider) WriterFor(ctx context.Context, conf influxdb.Config) (influxdb.Writer, error) {
+ return m.WriterForFn(ctx, conf)
+} | 1 | 1 | 16,106 | Can you rename this and the file `InfluxDBProvider` and `influxdb_provider.go` respectively? | influxdata-flux | go |
|
@@ -187,10 +187,10 @@ function innerDiffNode(dom, vchildren, context, mountAll, absorb) {
min = 0,
len = originalChildren.length,
childrenLen = 0,
- vlen = vchildren && vchildren.length,
+ vlen = vchildren ? vchildren.length : 0,
j, c, vchild, child;
- if (len) {
+ if (len!==0) {
for (let i=0; i<len; i++) {
let child = originalChildren[i],
props = child[ATTR_KEY], | 1 | import { ATTR_KEY } from '../constants';
import { isString, isFunction } from '../util';
import { isSameNodeType, isNamedNode } from './index';
import { isFunctionalComponent, buildFunctionalComponent } from './functional-component';
import { buildComponentFromVNode } from './component';
import { setAccessor, removeNode } from '../dom/index';
import { createNode, collectNode } from '../dom/recycler';
import { unmountComponent } from './component';
import options from '../options';
/** Queue of components that have been mounted and are awaiting componentDidMount */
export const mounts = [];
/** Diff recursion count, used to track the end of the diff cycle. */
export let diffLevel = 0;
/** Global flag indicating if the diff is currently within an SVG */
let isSvgMode = false;
/** Global flag indicating if the diff is performing hydration */
let hydrating = false;
/** Invoke queued componentDidMount lifecycle methods */
export function flushMounts() {
let c;
while ((c=mounts.pop())) {
if (options.afterMount) options.afterMount(c);
if (c.componentDidMount) c.componentDidMount();
}
}
/** Apply differences in a given vnode (and it's deep children) to a real DOM Node.
* @param {Element} [dom=null] A DOM node to mutate into the shape of the `vnode`
* @param {VNode} vnode A VNode (with descendants forming a tree) representing the desired DOM structure
* @returns {Element} dom The created/mutated element
* @private
*/
export function diff(dom, vnode, context, mountAll, parent, componentRoot) {
// diffLevel having been 0 here indicates initial entry into the diff (not a subdiff)
if (!diffLevel++) {
// when first starting the diff, check if we're diffing an SVG or within an SVG
isSvgMode = parent && typeof parent.ownerSVGElement!=='undefined';
// hydration is inidicated by the existing element to be diffed not having a prop cache
hydrating = dom && !(ATTR_KEY in dom);
}
let ret = idiff(dom, vnode, context, mountAll);
// append the element if its a new parent
if (parent && ret.parentNode!==parent) parent.appendChild(ret);
// diffLevel being reduced to 0 means we're exiting the diff
if (!--diffLevel) {
hydrating = false;
// invoke queued componentDidMount lifecycle methods
if (!componentRoot) flushMounts();
}
return ret;
}
function idiff(dom, vnode, context, mountAll) {
let ref = vnode && vnode.attributes && vnode.attributes.ref;
// Resolve ephemeral Pure Functional Components
while (isFunctionalComponent(vnode)) {
vnode = buildFunctionalComponent(vnode, context);
}
// empty values (null & undefined) render as empty Text nodes
if (vnode==null) vnode = '';
// Fast case: Strings create/update Text nodes.
if (isString(vnode)) {
// update if it's already a Text node
if (dom && dom instanceof Text && dom.parentNode) {
if (dom.nodeValue!=vnode) {
dom.nodeValue = vnode;
}
}
else {
// it wasn't a Text node: replace it with one and recycle the old Element
if (dom) recollectNodeTree(dom);
dom = document.createTextNode(vnode);
}
return dom;
}
// If the VNode represents a Component, perform a component diff.
if (isFunction(vnode.nodeName)) {
return buildComponentFromVNode(dom, vnode, context, mountAll);
}
let out = dom,
nodeName = String(vnode.nodeName), // @TODO this masks undefined component errors as `<undefined>`
prevSvgMode = isSvgMode,
vchildren = vnode.children;
// SVGs have special namespace stuff.
// This tracks entering and exiting that namespace when descending through the tree.
isSvgMode = nodeName==='svg' ? true : nodeName==='foreignObject' ? false : isSvgMode;
if (!dom) {
// case: we had no element to begin with
// - create an element with the nodeName from VNode
out = createNode(nodeName, isSvgMode);
}
else if (!isNamedNode(dom, nodeName)) {
// case: Element and VNode had different nodeNames
// - need to create the correct Element to match VNode
// - then migrate children from old to new
out = createNode(nodeName, isSvgMode);
// move children into the replacement node
while (dom.firstChild) out.appendChild(dom.firstChild);
// if the previous Element was mounted into the DOM, replace it inline
if (dom.parentNode) dom.parentNode.replaceChild(out, dom);
// recycle the old element (skips non-Element node types)
recollectNodeTree(dom);
}
let fc = out.firstChild,
props = out[ATTR_KEY];
// Attribute Hydration: if there is no prop cache on the element,
// ...create it and populate it with the element's attributes.
if (!props) {
out[ATTR_KEY] = props = {};
for (let a=out.attributes, i=a.length; i--; ) props[a[i].name] = a[i].value;
}
// Optimization: fast-path for elements containing a single TextNode:
if (!hydrating && vchildren && vchildren.length===1 && typeof vchildren[0]==='string' && fc && fc instanceof Text && !fc.nextSibling) {
if (fc.nodeValue!=vchildren[0]) {
fc.nodeValue = vchildren[0];
}
}
// otherwise, if there are existing or new children, diff them:
else if (vchildren && vchildren.length || fc) {
innerDiffNode(out, vchildren, context, mountAll, !!props.dangerouslySetInnerHTML);
}
// Apply attributes/props from VNode to the DOM Element:
diffAttributes(out, vnode.attributes, props);
// invoke original ref (from before resolving Pure Functional Components):
if (ref) {
(props.ref = ref)(out);
}
isSvgMode = prevSvgMode;
return out;
}
/** Apply child and attribute changes between a VNode and a DOM Node to the DOM.
* @param {Element} dom Element whose children should be compared & mutated
* @param {Array} vchildren Array of VNodes to compare to `dom.childNodes`
* @param {Object} context Implicitly descendant context object (from most recent `getChildContext()`)
* @param {Boolean} mountAll
* @param {Boolean} absorb If `true`, consumes externally created elements similar to hydration
*/
function innerDiffNode(dom, vchildren, context, mountAll, absorb) {
let originalChildren = dom.childNodes,
children = [],
keyed = {},
keyedLen = 0,
min = 0,
len = originalChildren.length,
childrenLen = 0,
vlen = vchildren && vchildren.length,
j, c, vchild, child;
if (len) {
for (let i=0; i<len; i++) {
let child = originalChildren[i],
props = child[ATTR_KEY],
key = vlen ? ((c = child._component) ? c.__key : props ? props.key : null) : null;
if (key!=null) {
keyedLen++;
keyed[key] = child;
}
else if (hydrating || absorb || props || child instanceof Text) {
children[childrenLen++] = child;
}
}
}
if (vlen) {
for (let i=0; i<vlen; i++) {
vchild = vchildren[i];
child = null;
// if (isFunctionalComponent(vchild)) {
// vchild = buildFunctionalComponent(vchild);
// }
// attempt to find a node based on key matching
let key = vchild.key;
if (key!=null) {
if (keyedLen && key in keyed) {
child = keyed[key];
keyed[key] = undefined;
keyedLen--;
}
}
// attempt to pluck a node of the same type from the existing children
else if (!child && min<childrenLen) {
for (j=min; j<childrenLen; j++) {
c = children[j];
if (c && isSameNodeType(c, vchild)) {
child = c;
children[j] = undefined;
if (j===childrenLen-1) childrenLen--;
if (j===min) min++;
break;
}
}
}
// morph the matched/found/created DOM child to match vchild (deep)
child = idiff(child, vchild, context, mountAll);
if (child && child!==dom) {
if (i>=len) {
dom.appendChild(child);
}
else if (child!==originalChildren[i]) {
if (child===originalChildren[i+1]) {
removeNode(originalChildren[i]);
}
dom.insertBefore(child, originalChildren[i] || null);
}
}
}
}
if (keyedLen) {
for (let i in keyed) if (keyed[i]) recollectNodeTree(keyed[i]);
}
// remove orphaned children
while (min<=childrenLen) {
child = children[childrenLen--];
if (child) recollectNodeTree(child);
}
}
/** Recursively recycle (or just unmount) a node an its descendants.
* @param {Node} node DOM node to start unmount/removal from
* @param {Boolean} [unmountOnly=false] If `true`, only triggers unmount lifecycle, skips removal
*/
export function recollectNodeTree(node, unmountOnly) {
let component = node._component;
if (component) {
// if node is owned by a Component, unmount that component (ends up recursing back here)
unmountComponent(component, !unmountOnly);
}
else {
// If the node's VNode had a ref function, invoke it with null here.
// (this is part of the React spec, and smart for unsetting references)
if (node[ATTR_KEY] && node[ATTR_KEY].ref) node[ATTR_KEY].ref(null);
if (!unmountOnly) {
collectNode(node);
}
// Recollect/unmount all children.
// - we use .lastChild here because it causes less reflow than .firstChild
// - it's also cheaper than accessing the .childNodes Live NodeList
let c;
while ((c=node.lastChild)) recollectNodeTree(c, unmountOnly);
}
}
/** Apply differences in attributes from a VNode to the given DOM Element.
* @param {Element} dom Element with attributes to diff `attrs` against
* @param {Object} attrs The desired end-state key-value attribute pairs
* @param {Object} old Current/previous attributes (from previous VNode or element's prop cache)
*/
function diffAttributes(dom, attrs, old) {
// remove attributes no longer present on the vnode by setting them to undefined
let name;
for (name in old) {
if (!(attrs && name in attrs) && old[name]!=null) {
setAccessor(dom, name, old[name], old[name] = undefined, isSvgMode);
}
}
// add new & update changed attributes
if (attrs) {
for (name in attrs) {
if (name!=='children' && name!=='innerHTML' && (!(name in old) || attrs[name]!==(name==='value' || name==='checked' ? dom[name] : old[name]))) {
setAccessor(dom, name, old[name], old[name] = attrs[name], isSvgMode);
}
}
}
}
| 1 | 10,667 | This is the real culprit, as now vlen will always be a Number (and known to the compiler as such). You could probably go one step further and avoid the ToBoolean on `vchildren` as well by writing something like `vlen = (vchildren !== undefined) ? vchildren.length : 0` if that matches the contract. | preactjs-preact | js |
@@ -41,6 +41,7 @@ public class NutritionProductFragment extends BaseFragment implements CustomTabA
@BindView(R.id.listNutrientLevels) ListView lv;
@BindView(R.id.textServingSize) TextView serving;
@BindView(R.id.textCarbonFootprint) TextView carbonFootprint;
+ @BindView(R.id.txtNoDataString)TextView noTextString;
private CustomTabActivityHelper customTabActivityHelper;
private Uri nutritionScoreUri;
| 1 | package openfoodfacts.github.scrachx.openfood.fragments;
import android.content.Context;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.customtabs.CustomTabsIntent;
import android.support.v4.content.ContextCompat;
import android.text.TextUtils;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.ListView;
import android.widget.TextView;
import java.util.ArrayList;
import java.util.List;
import butterknife.BindView;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.models.NutrientLevelItem;
import openfoodfacts.github.scrachx.openfood.models.NutrientLevels;
import openfoodfacts.github.scrachx.openfood.models.NutrimentLevel;
import openfoodfacts.github.scrachx.openfood.models.Nutriments;
import openfoodfacts.github.scrachx.openfood.models.Product;
import openfoodfacts.github.scrachx.openfood.models.State;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import openfoodfacts.github.scrachx.openfood.views.adapters.NutrientLevelListAdapter;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabActivityHelper;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabsHelper;
import openfoodfacts.github.scrachx.openfood.views.customtabs.WebViewFallback;
import static openfoodfacts.github.scrachx.openfood.utils.Utils.bold;
import static openfoodfacts.github.scrachx.openfood.utils.Utils.getRoundNumber;
public class NutritionProductFragment extends BaseFragment implements CustomTabActivityHelper.ConnectionCallback {
@BindView(R.id.imageGrade) ImageView img;
@BindView(R.id.listNutrientLevels) ListView lv;
@BindView(R.id.textServingSize) TextView serving;
@BindView(R.id.textCarbonFootprint) TextView carbonFootprint;
private CustomTabActivityHelper customTabActivityHelper;
private Uri nutritionScoreUri;
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
return createView(inflater, container, R.layout.fragment_nutrition_product);
}
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
Intent intent = getActivity().getIntent();
State state = (State) intent.getExtras().getSerializable("state");
final Product product = state.getProduct();
List<NutrientLevelItem> levelItem = new ArrayList<>();
Nutriments nutriments = product.getNutriments();
NutrientLevels nutrientLevels = product.getNutrientLevels();
NutrimentLevel fat = null;
NutrimentLevel saturatedFat = null;
NutrimentLevel sugars = null;
NutrimentLevel salt = null;
if(nutrientLevels != null) {
fat = nutrientLevels.getFat();
saturatedFat = nutrientLevels.getSaturatedFat();
sugars = nutrientLevels.getSugars();
salt = nutrientLevels.getSalt();
}
if (fat == null && salt == null && saturatedFat == null && sugars == null) {
levelItem.add(new NutrientLevelItem(getString(R.string.txtNoData), "", "", R.drawable.error_image));
} else {
// prefetch the uri
customTabActivityHelper = new CustomTabActivityHelper();
customTabActivityHelper.setConnectionCallback(this);
// currently only available in french translations
nutritionScoreUri = Uri.parse("https://fr.openfoodfacts.org/score-nutritionnel-france");
customTabActivityHelper.mayLaunchUrl(nutritionScoreUri, null, null);
Context context = this.getContext();
if (fat != null) {
String fatNutrimentLevel = fat.getLocalize(context);
Nutriments.Nutriment nutriment = nutriments.get(Nutriments.FAT);
levelItem.add(new NutrientLevelItem(getString(R.string.txtFat), getRoundNumber(nutriment.getFor100g()) + " " + nutriment.getUnit(), fatNutrimentLevel, fat.getImageLevel()));
}
if (saturatedFat != null) {
String saturatedFatLocalize = saturatedFat.getLocalize(context);
Nutriments.Nutriment nutriment = nutriments.get(Nutriments.SATURATED_FAT);
String saturatedFatValue = getRoundNumber(nutriment.getFor100g()) + " " + nutriment.getUnit();
levelItem.add(new NutrientLevelItem(getString(R.string.txtSaturatedFat), saturatedFatValue, saturatedFatLocalize, saturatedFat.getImageLevel()));
}
if (sugars != null) {
String sugarsLocalize = sugars.getLocalize(context);
Nutriments.Nutriment nutriment = nutriments.get(Nutriments.SUGARS);
String sugarsValue = getRoundNumber(nutriment.getFor100g()) + " " + nutriment.getUnit();
levelItem.add(new NutrientLevelItem(getString(R.string.txtSugars), sugarsValue, sugarsLocalize, sugars.getImageLevel()));
}
if (salt != null) {
String saltLocalize = salt.getLocalize(context);
Nutriments.Nutriment nutriment = nutriments.get(Nutriments.SALT);
String saltValue = getRoundNumber(nutriment.getFor100g()) + " " + nutriment.getUnit();
levelItem.add(new NutrientLevelItem(getString(R.string.txtSalt), saltValue, saltLocalize, salt.getImageLevel()));
}
img.setImageDrawable(ContextCompat.getDrawable(context, Utils.getImageGrade(product.getNutritionGradeFr())));
img.setOnClickListener(view1 -> {
CustomTabsIntent customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getContext(), customTabActivityHelper.getSession());
CustomTabActivityHelper.openCustomTab(NutritionProductFragment.this.getActivity(), customTabsIntent, nutritionScoreUri, new WebViewFallback());
});
}
lv.setAdapter(new NutrientLevelListAdapter(getContext(), levelItem));
if (TextUtils.isEmpty(product.getServingSize())) {
serving.setVisibility(View.GONE);
} else {
serving.append(bold(getString(R.string.txtServingSize)));
serving.append(" ");
serving.append(product.getServingSize());
}
if (!nutriments.contains(Nutriments.CARBON_FOOTPRINT)) {
carbonFootprint.setVisibility(View.GONE);
} else {
Nutriments.Nutriment carbonFootprintNutriment = nutriments.get(Nutriments.CARBON_FOOTPRINT);
carbonFootprint.append(bold(getString(R.string.textCarbonFootprint)));
carbonFootprint.append(carbonFootprintNutriment.getFor100g());
carbonFootprint.append(carbonFootprintNutriment.getUnit());
}
}
@Override
public void onCustomTabsConnected() {
img.setClickable(true);
}
@Override
public void onCustomTabsDisconnected() {
img.setClickable(false);
}
}
| 1 | 62,661 | missing space before `TextView` | openfoodfacts-openfoodfacts-androidapp | java |
@@ -616,6 +616,16 @@ Attr_ReadValue Item::readAttr(AttrTypes_t attr, PropStream& propStream)
break;
}
+ case ATTR_OPENCONTAINER: {
+ int32_t openContainer;
+ if (!propStream.read<int32_t>(openContainer)) {
+ return ATTR_READ_ERROR;
+ }
+
+ setIntAttr(ITEM_ATTRIBUTE_OPENCONTAINER, openContainer);
+ break;
+ }
+
//these should be handled through derived classes
//If these are called then something has changed in the items.xml since the map was saved
//just read the values | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "item.h"
#include "container.h"
#include "teleport.h"
#include "trashholder.h"
#include "mailbox.h"
#include "house.h"
#include "game.h"
#include "bed.h"
#include "actions.h"
#include "spells.h"
extern Game g_game;
extern Spells* g_spells;
extern Vocations g_vocations;
Items Item::items;
Item* Item::CreateItem(const uint16_t type, uint16_t count /*= 0*/)
{
Item* newItem = nullptr;
const ItemType& it = Item::items[type];
if (it.group == ITEM_GROUP_DEPRECATED) {
return nullptr;
}
if (it.stackable && count == 0) {
count = 1;
}
if (it.id != 0) {
if (it.isDepot()) {
newItem = new DepotLocker(type);
} else if (it.isContainer()) {
newItem = new Container(type);
} else if (it.isTeleport()) {
newItem = new Teleport(type);
} else if (it.isMagicField()) {
newItem = new MagicField(type);
} else if (it.isDoor()) {
newItem = new Door(type);
} else if (it.isTrashHolder()) {
newItem = new TrashHolder(type);
} else if (it.isMailbox()) {
newItem = new Mailbox(type);
} else if (it.isBed()) {
newItem = new BedItem(type);
} else if (it.id >= 2210 && it.id <= 2212) { // magic rings
newItem = new Item(type - 3, count);
} else if (it.id == 2215 || it.id == 2216) { // magic rings
newItem = new Item(type - 2, count);
} else if (it.id >= 2202 && it.id <= 2206) { // magic rings
newItem = new Item(type - 37, count);
} else if (it.id == 2640) { // soft boots
newItem = new Item(6132, count);
} else if (it.id == 6301) { // death ring
newItem = new Item(6300, count);
} else if (it.id == 18528) { // prismatic ring
newItem = new Item(18408, count);
} else {
newItem = new Item(type, count);
}
newItem->incrementReferenceCounter();
}
return newItem;
}
Container* Item::CreateItemAsContainer(const uint16_t type, uint16_t size)
{
const ItemType& it = Item::items[type];
if (it.id == 0 || it.group == ITEM_GROUP_DEPRECATED || it.stackable || it.useable || it.moveable || it.pickupable || it.isDepot() || it.isSplash() || it.isDoor()) {
return nullptr;
}
Container* newItem = new Container(type, size);
newItem->incrementReferenceCounter();
return newItem;
}
Item* Item::CreateItem(PropStream& propStream)
{
uint16_t id;
if (!propStream.read<uint16_t>(id)) {
return nullptr;
}
switch (id) {
case ITEM_FIREFIELD_PVP_FULL:
id = ITEM_FIREFIELD_PERSISTENT_FULL;
break;
case ITEM_FIREFIELD_PVP_MEDIUM:
id = ITEM_FIREFIELD_PERSISTENT_MEDIUM;
break;
case ITEM_FIREFIELD_PVP_SMALL:
id = ITEM_FIREFIELD_PERSISTENT_SMALL;
break;
case ITEM_ENERGYFIELD_PVP:
id = ITEM_ENERGYFIELD_PERSISTENT;
break;
case ITEM_POISONFIELD_PVP:
id = ITEM_POISONFIELD_PERSISTENT;
break;
case ITEM_MAGICWALL:
id = ITEM_MAGICWALL_PERSISTENT;
break;
case ITEM_WILDGROWTH:
id = ITEM_WILDGROWTH_PERSISTENT;
break;
default:
break;
}
return Item::CreateItem(id, 0);
}
Item::Item(const uint16_t type, uint16_t count /*= 0*/) :
id(type)
{
const ItemType& it = items[id];
if (it.isFluidContainer() || it.isSplash()) {
setFluidType(count);
} else if (it.stackable) {
if (count != 0) {
setItemCount(count);
} else if (it.charges != 0) {
setItemCount(it.charges);
}
} else if (it.charges != 0) {
if (count != 0) {
setCharges(count);
} else {
setCharges(it.charges);
}
}
setDefaultDuration();
}
Item::Item(const Item& i) :
Thing(), id(i.id), count(i.count), loadedFromMap(i.loadedFromMap)
{
if (i.attributes) {
attributes.reset(new ItemAttributes(*i.attributes));
}
}
Item* Item::clone() const
{
Item* item = Item::CreateItem(id, count);
if (attributes) {
item->attributes.reset(new ItemAttributes(*attributes));
if (item->getDuration() > 0) {
item->incrementReferenceCounter();
item->setDecaying(DECAYING_TRUE);
g_game.toDecayItems.push_front(item);
}
}
return item;
}
bool Item::equals(const Item* otherItem) const
{
if (!otherItem || id != otherItem->id) {
return false;
}
const auto& otherAttributes = otherItem->attributes;
if (!attributes) {
return !otherAttributes || (otherAttributes->attributeBits == 0);
} else if (!otherAttributes) {
return (attributes->attributeBits == 0);
}
if (attributes->attributeBits != otherAttributes->attributeBits) {
return false;
}
const auto& attributeList = attributes->attributes;
const auto& otherAttributeList = otherAttributes->attributes;
for (const auto& attribute : attributeList) {
if (ItemAttributes::isStrAttrType(attribute.type)) {
for (const auto& otherAttribute : otherAttributeList) {
if (attribute.type == otherAttribute.type && *attribute.value.string != *otherAttribute.value.string) {
return false;
}
}
} else {
for (const auto& otherAttribute : otherAttributeList) {
if (attribute.type == otherAttribute.type && attribute.value.integer != otherAttribute.value.integer) {
return false;
}
}
}
}
return true;
}
void Item::setDefaultSubtype()
{
const ItemType& it = items[id];
setItemCount(1);
if (it.charges != 0) {
if (it.stackable) {
setItemCount(it.charges);
} else {
setCharges(it.charges);
}
}
}
void Item::onRemoved()
{
ScriptEnvironment::removeTempItem(this);
if (hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
g_game.removeUniqueItem(getUniqueId());
}
}
void Item::setID(uint16_t newid)
{
const ItemType& prevIt = Item::items[id];
id = newid;
const ItemType& it = Item::items[newid];
uint32_t newDuration = it.decayTime * 1000;
if (newDuration == 0 && !it.stopTime && it.decayTo < 0) {
removeAttribute(ITEM_ATTRIBUTE_DECAYSTATE);
removeAttribute(ITEM_ATTRIBUTE_DURATION);
}
removeAttribute(ITEM_ATTRIBUTE_CORPSEOWNER);
if (newDuration > 0 && (!prevIt.stopTime || !hasAttribute(ITEM_ATTRIBUTE_DURATION))) {
setDecaying(DECAYING_FALSE);
setDuration(newDuration);
}
}
Cylinder* Item::getTopParent()
{
Cylinder* aux = getParent();
Cylinder* prevaux = dynamic_cast<Cylinder*>(this);
if (!aux) {
return prevaux;
}
while (aux->getParent() != nullptr) {
prevaux = aux;
aux = aux->getParent();
}
if (prevaux) {
return prevaux;
}
return aux;
}
const Cylinder* Item::getTopParent() const
{
const Cylinder* aux = getParent();
const Cylinder* prevaux = dynamic_cast<const Cylinder*>(this);
if (!aux) {
return prevaux;
}
while (aux->getParent() != nullptr) {
prevaux = aux;
aux = aux->getParent();
}
if (prevaux) {
return prevaux;
}
return aux;
}
Tile* Item::getTile()
{
Cylinder* cylinder = getTopParent();
//get root cylinder
if (cylinder && cylinder->getParent()) {
cylinder = cylinder->getParent();
}
return dynamic_cast<Tile*>(cylinder);
}
const Tile* Item::getTile() const
{
const Cylinder* cylinder = getTopParent();
//get root cylinder
if (cylinder && cylinder->getParent()) {
cylinder = cylinder->getParent();
}
return dynamic_cast<const Tile*>(cylinder);
}
uint16_t Item::getSubType() const
{
const ItemType& it = items[id];
if (it.isFluidContainer() || it.isSplash()) {
return getFluidType();
} else if (it.stackable) {
return count;
} else if (it.charges != 0) {
return getCharges();
}
return count;
}
Player* Item::getHoldingPlayer() const
{
Cylinder* p = getParent();
while (p) {
if (p->getCreature()) {
return p->getCreature()->getPlayer();
}
p = p->getParent();
}
return nullptr;
}
void Item::setSubType(uint16_t n)
{
const ItemType& it = items[id];
if (it.isFluidContainer() || it.isSplash()) {
setFluidType(n);
} else if (it.stackable) {
setItemCount(n);
} else if (it.charges != 0) {
setCharges(n);
} else {
setItemCount(n);
}
}
Attr_ReadValue Item::readAttr(AttrTypes_t attr, PropStream& propStream)
{
switch (attr) {
case ATTR_COUNT:
case ATTR_RUNE_CHARGES: {
uint8_t count;
if (!propStream.read<uint8_t>(count)) {
return ATTR_READ_ERROR;
}
setSubType(count);
break;
}
case ATTR_ACTION_ID: {
uint16_t actionId;
if (!propStream.read<uint16_t>(actionId)) {
return ATTR_READ_ERROR;
}
setActionId(actionId);
break;
}
case ATTR_UNIQUE_ID: {
uint16_t uniqueId;
if (!propStream.read<uint16_t>(uniqueId)) {
return ATTR_READ_ERROR;
}
setUniqueId(uniqueId);
break;
}
case ATTR_TEXT: {
std::string text;
if (!propStream.readString(text)) {
return ATTR_READ_ERROR;
}
setText(text);
break;
}
case ATTR_WRITTENDATE: {
uint32_t writtenDate;
if (!propStream.read<uint32_t>(writtenDate)) {
return ATTR_READ_ERROR;
}
setDate(writtenDate);
break;
}
case ATTR_WRITTENBY: {
std::string writer;
if (!propStream.readString(writer)) {
return ATTR_READ_ERROR;
}
setWriter(writer);
break;
}
case ATTR_DESC: {
std::string text;
if (!propStream.readString(text)) {
return ATTR_READ_ERROR;
}
setSpecialDescription(text);
break;
}
case ATTR_CHARGES: {
uint16_t charges;
if (!propStream.read<uint16_t>(charges)) {
return ATTR_READ_ERROR;
}
setSubType(charges);
break;
}
case ATTR_DURATION: {
int32_t duration;
if (!propStream.read<int32_t>(duration)) {
return ATTR_READ_ERROR;
}
setDuration(std::max<int32_t>(0, duration));
break;
}
case ATTR_DECAYING_STATE: {
uint8_t state;
if (!propStream.read<uint8_t>(state)) {
return ATTR_READ_ERROR;
}
if (state != DECAYING_FALSE) {
setDecaying(DECAYING_PENDING);
}
break;
}
case ATTR_NAME: {
std::string name;
if (!propStream.readString(name)) {
return ATTR_READ_ERROR;
}
setStrAttr(ITEM_ATTRIBUTE_NAME, name);
break;
}
case ATTR_ARTICLE: {
std::string article;
if (!propStream.readString(article)) {
return ATTR_READ_ERROR;
}
setStrAttr(ITEM_ATTRIBUTE_ARTICLE, article);
break;
}
case ATTR_PLURALNAME: {
std::string pluralName;
if (!propStream.readString(pluralName)) {
return ATTR_READ_ERROR;
}
setStrAttr(ITEM_ATTRIBUTE_PLURALNAME, pluralName);
break;
}
case ATTR_WEIGHT: {
uint32_t weight;
if (!propStream.read<uint32_t>(weight)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_WEIGHT, weight);
break;
}
case ATTR_ATTACK: {
int32_t attack;
if (!propStream.read<int32_t>(attack)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_ATTACK, attack);
break;
}
case ATTR_ATTACK_SPEED: {
uint32_t attackSpeed;
if (!propStream.read<uint32_t>(attackSpeed)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_ATTACK_SPEED, attackSpeed);
break;
}
case ATTR_DEFENSE: {
int32_t defense;
if (!propStream.read<int32_t>(defense)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_DEFENSE, defense);
break;
}
case ATTR_EXTRADEFENSE: {
int32_t extraDefense;
if (!propStream.read<int32_t>(extraDefense)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_EXTRADEFENSE, extraDefense);
break;
}
case ATTR_ARMOR: {
int32_t armor;
if (!propStream.read<int32_t>(armor)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_ARMOR, armor);
break;
}
case ATTR_HITCHANCE: {
int8_t hitChance;
if (!propStream.read<int8_t>(hitChance)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_HITCHANCE, hitChance);
break;
}
case ATTR_SHOOTRANGE: {
uint8_t shootRange;
if (!propStream.read<uint8_t>(shootRange)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_SHOOTRANGE, shootRange);
break;
}
case ATTR_DECAYTO: {
int32_t decayTo;
if (!propStream.read<int32_t>(decayTo)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_DECAYTO, decayTo);
break;
}
case ATTR_WRAPID: {
uint16_t wrapId;
if (!propStream.read<uint16_t>(wrapId)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_WRAPID, wrapId);
break;
}
case ATTR_STOREITEM: {
uint8_t storeItem;
if (!propStream.read<uint8_t>(storeItem)) {
return ATTR_READ_ERROR;
}
setIntAttr(ITEM_ATTRIBUTE_STOREITEM, storeItem);
break;
}
//these should be handled through derived classes
//If these are called then something has changed in the items.xml since the map was saved
//just read the values
//Depot class
case ATTR_DEPOT_ID: {
if (!propStream.skip(2)) {
return ATTR_READ_ERROR;
}
break;
}
//Door class
case ATTR_HOUSEDOORID: {
if (!propStream.skip(1)) {
return ATTR_READ_ERROR;
}
break;
}
//Bed class
case ATTR_SLEEPERGUID: {
if (!propStream.skip(4)) {
return ATTR_READ_ERROR;
}
break;
}
case ATTR_SLEEPSTART: {
if (!propStream.skip(4)) {
return ATTR_READ_ERROR;
}
break;
}
//Teleport class
case ATTR_TELE_DEST: {
if (!propStream.skip(5)) {
return ATTR_READ_ERROR;
}
break;
}
//Container class
case ATTR_CONTAINER_ITEMS: {
return ATTR_READ_ERROR;
}
case ATTR_CUSTOM_ATTRIBUTES: {
uint64_t size;
if (!propStream.read<uint64_t>(size)) {
return ATTR_READ_ERROR;
}
for (uint64_t i = 0; i < size; i++) {
// Unserialize key type and value
std::string key;
if (!propStream.readString(key)) {
return ATTR_READ_ERROR;
};
// Unserialize value type and value
ItemAttributes::CustomAttribute val;
if (!val.unserialize(propStream)) {
return ATTR_READ_ERROR;
}
setCustomAttribute(key, val);
}
break;
}
default:
return ATTR_READ_ERROR;
}
return ATTR_READ_CONTINUE;
}
bool Item::unserializeAttr(PropStream& propStream)
{
uint8_t attr_type;
while (propStream.read<uint8_t>(attr_type) && attr_type != 0) {
Attr_ReadValue ret = readAttr(static_cast<AttrTypes_t>(attr_type), propStream);
if (ret == ATTR_READ_ERROR) {
return false;
} else if (ret == ATTR_READ_END) {
return true;
}
}
return true;
}
bool Item::unserializeItemNode(OTB::Loader&, const OTB::Node&, PropStream& propStream)
{
return unserializeAttr(propStream);
}
void Item::serializeAttr(PropWriteStream& propWriteStream) const
{
const ItemType& it = items[id];
if (it.stackable || it.isFluidContainer() || it.isSplash()) {
propWriteStream.write<uint8_t>(ATTR_COUNT);
propWriteStream.write<uint8_t>(getSubType());
}
uint16_t charges = getCharges();
if (charges != 0) {
propWriteStream.write<uint8_t>(ATTR_CHARGES);
propWriteStream.write<uint16_t>(charges);
}
if (it.moveable) {
uint16_t actionId = getActionId();
if (actionId != 0) {
propWriteStream.write<uint8_t>(ATTR_ACTION_ID);
propWriteStream.write<uint16_t>(actionId);
}
}
const std::string& text = getText();
if (!text.empty()) {
propWriteStream.write<uint8_t>(ATTR_TEXT);
propWriteStream.writeString(text);
}
const time_t writtenDate = getDate();
if (writtenDate != 0) {
propWriteStream.write<uint8_t>(ATTR_WRITTENDATE);
propWriteStream.write<uint32_t>(writtenDate);
}
const std::string& writer = getWriter();
if (!writer.empty()) {
propWriteStream.write<uint8_t>(ATTR_WRITTENBY);
propWriteStream.writeString(writer);
}
const std::string& specialDesc = getSpecialDescription();
if (!specialDesc.empty()) {
propWriteStream.write<uint8_t>(ATTR_DESC);
propWriteStream.writeString(specialDesc);
}
if (hasAttribute(ITEM_ATTRIBUTE_DURATION)) {
propWriteStream.write<uint8_t>(ATTR_DURATION);
propWriteStream.write<uint32_t>(getIntAttr(ITEM_ATTRIBUTE_DURATION));
}
ItemDecayState_t decayState = getDecaying();
if (decayState == DECAYING_TRUE || decayState == DECAYING_PENDING) {
propWriteStream.write<uint8_t>(ATTR_DECAYING_STATE);
propWriteStream.write<uint8_t>(decayState);
}
if (hasAttribute(ITEM_ATTRIBUTE_NAME)) {
propWriteStream.write<uint8_t>(ATTR_NAME);
propWriteStream.writeString(getStrAttr(ITEM_ATTRIBUTE_NAME));
}
if (hasAttribute(ITEM_ATTRIBUTE_ARTICLE)) {
propWriteStream.write<uint8_t>(ATTR_ARTICLE);
propWriteStream.writeString(getStrAttr(ITEM_ATTRIBUTE_ARTICLE));
}
if (hasAttribute(ITEM_ATTRIBUTE_PLURALNAME)) {
propWriteStream.write<uint8_t>(ATTR_PLURALNAME);
propWriteStream.writeString(getStrAttr(ITEM_ATTRIBUTE_PLURALNAME));
}
if (hasAttribute(ITEM_ATTRIBUTE_WEIGHT)) {
propWriteStream.write<uint8_t>(ATTR_WEIGHT);
propWriteStream.write<uint32_t>(getIntAttr(ITEM_ATTRIBUTE_WEIGHT));
}
if (hasAttribute(ITEM_ATTRIBUTE_ATTACK)) {
propWriteStream.write<uint8_t>(ATTR_ATTACK);
propWriteStream.write<int32_t>(getIntAttr(ITEM_ATTRIBUTE_ATTACK));
}
if (hasAttribute(ITEM_ATTRIBUTE_ATTACK_SPEED)) {
propWriteStream.write<uint8_t>(ATTR_ATTACK_SPEED);
propWriteStream.write<uint32_t>(getIntAttr(ITEM_ATTRIBUTE_ATTACK_SPEED));
}
if (hasAttribute(ITEM_ATTRIBUTE_DEFENSE)) {
propWriteStream.write<uint8_t>(ATTR_DEFENSE);
propWriteStream.write<int32_t>(getIntAttr(ITEM_ATTRIBUTE_DEFENSE));
}
if (hasAttribute(ITEM_ATTRIBUTE_EXTRADEFENSE)) {
propWriteStream.write<uint8_t>(ATTR_EXTRADEFENSE);
propWriteStream.write<int32_t>(getIntAttr(ITEM_ATTRIBUTE_EXTRADEFENSE));
}
if (hasAttribute(ITEM_ATTRIBUTE_ARMOR)) {
propWriteStream.write<uint8_t>(ATTR_ARMOR);
propWriteStream.write<int32_t>(getIntAttr(ITEM_ATTRIBUTE_ARMOR));
}
if (hasAttribute(ITEM_ATTRIBUTE_HITCHANCE)) {
propWriteStream.write<uint8_t>(ATTR_HITCHANCE);
propWriteStream.write<int8_t>(getIntAttr(ITEM_ATTRIBUTE_HITCHANCE));
}
if (hasAttribute(ITEM_ATTRIBUTE_SHOOTRANGE)) {
propWriteStream.write<uint8_t>(ATTR_SHOOTRANGE);
propWriteStream.write<uint8_t>(getIntAttr(ITEM_ATTRIBUTE_SHOOTRANGE));
}
if (hasAttribute(ITEM_ATTRIBUTE_DECAYTO)) {
propWriteStream.write<uint8_t>(ATTR_DECAYTO);
propWriteStream.write<int32_t>(getIntAttr(ITEM_ATTRIBUTE_DECAYTO));
}
if (hasAttribute(ITEM_ATTRIBUTE_WRAPID)) {
propWriteStream.write<uint8_t>(ATTR_WRAPID);
propWriteStream.write<uint16_t>(getIntAttr(ITEM_ATTRIBUTE_WRAPID));
}
if (hasAttribute(ITEM_ATTRIBUTE_STOREITEM)) {
propWriteStream.write<uint8_t>(ATTR_STOREITEM);
propWriteStream.write<uint8_t>(getIntAttr(ITEM_ATTRIBUTE_STOREITEM));
}
if (hasAttribute(ITEM_ATTRIBUTE_CUSTOM)) {
const ItemAttributes::CustomAttributeMap* customAttrMap = attributes->getCustomAttributeMap();
propWriteStream.write<uint8_t>(ATTR_CUSTOM_ATTRIBUTES);
propWriteStream.write<uint64_t>(static_cast<uint64_t>(customAttrMap->size()));
for (const auto &entry : *customAttrMap) {
// Serializing key type and value
propWriteStream.writeString(entry.first);
// Serializing value type and value
entry.second.serialize(propWriteStream);
}
}
}
bool Item::hasProperty(ITEMPROPERTY prop) const
{
const ItemType& it = items[id];
switch (prop) {
case CONST_PROP_BLOCKSOLID: return it.blockSolid;
case CONST_PROP_MOVEABLE: return it.moveable && !hasAttribute(ITEM_ATTRIBUTE_UNIQUEID);
case CONST_PROP_HASHEIGHT: return it.hasHeight;
case CONST_PROP_BLOCKPROJECTILE: return it.blockProjectile;
case CONST_PROP_BLOCKPATH: return it.blockPathFind;
case CONST_PROP_ISVERTICAL: return it.isVertical;
case CONST_PROP_ISHORIZONTAL: return it.isHorizontal;
case CONST_PROP_IMMOVABLEBLOCKSOLID: return it.blockSolid && (!it.moveable || hasAttribute(ITEM_ATTRIBUTE_UNIQUEID));
case CONST_PROP_IMMOVABLEBLOCKPATH: return it.blockPathFind && (!it.moveable || hasAttribute(ITEM_ATTRIBUTE_UNIQUEID));
case CONST_PROP_IMMOVABLENOFIELDBLOCKPATH: return !it.isMagicField() && it.blockPathFind && (!it.moveable || hasAttribute(ITEM_ATTRIBUTE_UNIQUEID));
case CONST_PROP_NOFIELDBLOCKPATH: return !it.isMagicField() && it.blockPathFind;
case CONST_PROP_SUPPORTHANGABLE: return it.isHorizontal || it.isVertical;
default: return false;
}
}
uint32_t Item::getWeight() const
{
uint32_t weight = getBaseWeight();
if (isStackable()) {
return weight * std::max<uint32_t>(1, getItemCount());
}
return weight;
}
std::string Item::getDescription(const ItemType& it, int32_t lookDistance,
const Item* item /*= nullptr*/, int32_t subType /*= -1*/, bool addArticle /*= true*/)
{
const std::string* text = nullptr;
std::ostringstream s;
s << getNameDescription(it, item, subType, addArticle);
if (item) {
subType = item->getSubType();
}
if (it.isRune()) {
if (it.runeLevel > 0 || it.runeMagLevel > 0) {
if (RuneSpell* rune = g_spells->getRuneSpell(it.id)) {
int32_t tmpSubType = subType;
if (item) {
tmpSubType = item->getSubType();
}
s << " (\"" << it.runeSpellName << "\"). " << (it.stackable && tmpSubType > 1 ? "They" : "It") << " can only be used by ";
const VocSpellMap& vocMap = rune->getVocMap();
std::vector<Vocation*> showVocMap;
// vocations are usually listed with the unpromoted and promoted version, the latter being
// hidden from description, so `total / 2` is most likely the amount of vocations to be shown.
showVocMap.reserve(vocMap.size() / 2);
for (const auto& voc : vocMap) {
if (voc.second) {
showVocMap.push_back(g_vocations.getVocation(voc.first));
}
}
if (!showVocMap.empty()) {
auto vocIt = showVocMap.begin(), vocLast = (showVocMap.end() - 1);
while (vocIt != vocLast) {
s << asLowerCaseString((*vocIt)->getVocName()) << "s";
if (++vocIt == vocLast) {
s << " and ";
} else {
s << ", ";
}
}
s << asLowerCaseString((*vocLast)->getVocName()) << "s";
} else {
s << "players";
}
s << " with";
if (it.runeLevel > 0) {
s << " level " << it.runeLevel;
}
if (it.runeMagLevel > 0) {
if (it.runeLevel > 0) {
s << " and";
}
s << " magic level " << it.runeMagLevel;
}
s << " or higher";
}
}
} else if (it.weaponType != WEAPON_NONE) {
bool begin = true;
if (it.weaponType == WEAPON_DISTANCE && it.ammoType != AMMO_NONE) {
s << " (Range:" << static_cast<uint16_t>(item ? item->getShootRange() : it.shootRange);
int32_t attack;
int8_t hitChance;
if (item) {
attack = item->getAttack();
hitChance = item->getHitChance();
} else {
attack = it.attack;
hitChance = it.hitChance;
}
if (attack != 0) {
s << ", Atk" << std::showpos << attack << std::noshowpos;
}
if (hitChance != 0) {
s << ", Hit%" << std::showpos << static_cast<int16_t>(hitChance) << std::noshowpos;
}
begin = false;
} else if (it.weaponType != WEAPON_AMMO) {
int32_t attack, defense, extraDefense;
if (item) {
attack = item->getAttack();
defense = item->getDefense();
extraDefense = item->getExtraDefense();
} else {
attack = it.attack;
defense = it.defense;
extraDefense = it.extraDefense;
}
if (attack != 0) {
begin = false;
s << " (Atk:" << attack;
if (it.abilities && it.abilities->elementType != COMBAT_NONE && it.abilities->elementDamage != 0) {
s << " physical + " << it.abilities->elementDamage << ' ' << getCombatName(it.abilities->elementType);
}
}
uint32_t attackSpeed = item ? item->getAttackSpeed() : it.attackSpeed;
if (attackSpeed) {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "Atk Spd:" << (attackSpeed / 1000.) << "s";
}
if (defense != 0 || extraDefense != 0) {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "Def:" << defense;
if (extraDefense != 0) {
s << ' ' << std::showpos << extraDefense << std::noshowpos;
}
}
}
if (it.abilities) {
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; i++) {
if (!it.abilities->skills[i]) {
continue;
}
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << getSkillName(i) << ' ' << std::showpos << it.abilities->skills[i] << std::noshowpos;
}
for (uint8_t i = SPECIALSKILL_FIRST; i <= SPECIALSKILL_LAST; i++) {
if (!it.abilities->specialSkills[i]) {
continue;
}
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << getSpecialSkillName(i) << ' ' << std::showpos << it.abilities->specialSkills[i] << '%' << std::noshowpos;
}
if (it.abilities->stats[STAT_MAGICPOINTS]) {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "magic level " << std::showpos << it.abilities->stats[STAT_MAGICPOINTS] << std::noshowpos;
}
int16_t show = it.abilities->absorbPercent[0];
if (show != 0) {
for (size_t i = 1; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] != show) {
show = 0;
break;
}
}
}
if (show == 0) {
bool tmp = true;
for (size_t i = 0; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] == 0) {
continue;
}
if (tmp) {
tmp = false;
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection ";
} else {
s << ", ";
}
s << getCombatName(indexToCombatType(i)) << ' ' << std::showpos << it.abilities->absorbPercent[i] << std::noshowpos << '%';
}
} else {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection all " << std::showpos << show << std::noshowpos << '%';
}
show = it.abilities->fieldAbsorbPercent[0];
if (show != 0) {
for (size_t i = 1; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] != show) {
show = 0;
break;
}
}
}
if (show == 0) {
bool tmp = true;
for (size_t i = 0; i < COMBAT_COUNT; ++i) {
if (it.abilities->fieldAbsorbPercent[i] == 0) {
continue;
}
if (tmp) {
tmp = false;
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection ";
} else {
s << ", ";
}
s << getCombatName(indexToCombatType(i)) << " field " << std::showpos << it.abilities->fieldAbsorbPercent[i] << std::noshowpos << '%';
}
} else {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection all fields " << std::showpos << show << std::noshowpos << '%';
}
if (it.abilities->speed) {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "speed " << std::showpos << (it.abilities->speed >> 1) << std::noshowpos;
}
}
if (!begin) {
s << ')';
}
} else if (it.armor != 0 || (item && item->getArmor() != 0) || it.showAttributes) {
bool begin = true;
int32_t armor = (item ? item->getArmor() : it.armor);
if (armor != 0) {
s << " (Arm:" << armor;
begin = false;
}
if (it.abilities) {
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; i++) {
if (!it.abilities->skills[i]) {
continue;
}
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << getSkillName(i) << ' ' << std::showpos << it.abilities->skills[i] << std::noshowpos;
}
if (it.abilities->stats[STAT_MAGICPOINTS]) {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "magic level " << std::showpos << it.abilities->stats[STAT_MAGICPOINTS] << std::noshowpos;
}
int16_t show = it.abilities->absorbPercent[0];
if (show != 0) {
for (size_t i = 1; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] != show) {
show = 0;
break;
}
}
}
if (!show) {
bool protectionBegin = true;
for (size_t i = 0; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] == 0) {
continue;
}
if (protectionBegin) {
protectionBegin = false;
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection ";
} else {
s << ", ";
}
s << getCombatName(indexToCombatType(i)) << ' ' << std::showpos << it.abilities->absorbPercent[i] << std::noshowpos << '%';
}
} else {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection all " << std::showpos << show << std::noshowpos << '%';
}
show = it.abilities->fieldAbsorbPercent[0];
if (show != 0) {
for (size_t i = 1; i < COMBAT_COUNT; ++i) {
if (it.abilities->absorbPercent[i] != show) {
show = 0;
break;
}
}
}
if (!show) {
bool tmp = true;
for (size_t i = 0; i < COMBAT_COUNT; ++i) {
if (it.abilities->fieldAbsorbPercent[i] == 0) {
continue;
}
if (tmp) {
tmp = false;
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection ";
} else {
s << ", ";
}
s << getCombatName(indexToCombatType(i)) << " field " << std::showpos << it.abilities->fieldAbsorbPercent[i] << std::noshowpos << '%';
}
} else {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "protection all fields " << std::showpos << show << std::noshowpos << '%';
}
if (it.abilities->speed) {
if (begin) {
begin = false;
s << " (";
} else {
s << ", ";
}
s << "speed " << std::showpos << (it.abilities->speed >> 1) << std::noshowpos;
}
}
if (!begin) {
s << ')';
}
} else if (it.isContainer() || (item && item->getContainer())) {
uint32_t volume = 0;
if (!item || !item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
if (it.isContainer()) {
volume = it.maxItems;
} else {
volume = item->getContainer()->capacity();
}
}
if (volume != 0) {
s << " (Vol:" << volume << ')';
}
} else {
bool found = true;
if (it.abilities) {
if (it.abilities->speed > 0) {
s << " (speed " << std::showpos << (it.abilities->speed / 2) << std::noshowpos << ')';
} else if (hasBitSet(CONDITION_DRUNK, it.abilities->conditionSuppressions)) {
s << " (hard drinking)";
} else if (it.abilities->invisible) {
s << " (invisibility)";
} else if (it.abilities->regeneration) {
s << " (faster regeneration)";
} else if (it.abilities->manaShield) {
s << " (mana shield)";
} else {
found = false;
}
} else {
found = false;
}
if (!found) {
if (it.isKey()) {
int32_t keyNumber = (item ? item->getActionId() : 0);
if (keyNumber != 0) {
s << " (Key:" << std::setfill('0') << std::setw(4) << keyNumber << ')';
}
} else if (it.isFluidContainer()) {
if (subType > 0) {
const std::string& itemName = items[subType].name;
s << " of " << (!itemName.empty() ? itemName : "unknown");
} else {
s << ". It is empty";
}
} else if (it.isSplash()) {
s << " of ";
if (subType > 0 && !items[subType].name.empty()) {
s << items[subType].name;
} else {
s << "unknown";
}
} else if (it.allowDistRead && (it.id < 7369 || it.id > 7371)) {
s << ".\n";
if (lookDistance <= 4) {
if (item) {
text = &item->getText();
if (!text->empty()) {
const std::string& writer = item->getWriter();
if (!writer.empty()) {
s << writer << " wrote";
time_t date = item->getDate();
if (date != 0) {
s << " on " << formatDateShort(date);
}
s << ": ";
} else {
s << "You read: ";
}
s << *text;
} else {
s << "Nothing is written on it";
}
} else {
s << "Nothing is written on it";
}
} else {
s << "You are too far away to read it";
}
} else if (it.levelDoor != 0 && item) {
uint16_t actionId = item->getActionId();
if (actionId >= it.levelDoor) {
s << " for level " << (actionId - it.levelDoor);
}
}
}
}
if (it.showCharges) {
s << " that has " << subType << " charge" << (subType != 1 ? "s" : "") << " left";
}
if (it.showDuration) {
if (item && item->hasAttribute(ITEM_ATTRIBUTE_DURATION)) {
uint32_t duration = item->getDuration() / 1000;
s << " that will expire in ";
if (duration >= 86400) {
uint16_t days = duration / 86400;
uint16_t hours = (duration % 86400) / 3600;
s << days << " day" << (days != 1 ? "s" : "");
if (hours > 0) {
s << " and " << hours << " hour" << (hours != 1 ? "s" : "");
}
} else if (duration >= 3600) {
uint16_t hours = duration / 3600;
uint16_t minutes = (duration % 3600) / 60;
s << hours << " hour" << (hours != 1 ? "s" : "");
if (minutes > 0) {
s << " and " << minutes << " minute" << (minutes != 1 ? "s" : "");
}
} else if (duration >= 60) {
uint16_t minutes = duration / 60;
s << minutes << " minute" << (minutes != 1 ? "s" : "");
uint16_t seconds = duration % 60;
if (seconds > 0) {
s << " and " << seconds << " second" << (seconds != 1 ? "s" : "");
}
} else {
s << duration << " second" << (duration != 1 ? "s" : "");
}
} else {
s << " that is brand-new";
}
}
if (!it.allowDistRead || (it.id >= 7369 && it.id <= 7371)) {
s << '.';
} else {
if (!text && item) {
text = &item->getText();
}
if (!text || text->empty()) {
s << '.';
}
}
if (it.wieldInfo != 0) {
s << "\nIt can only be wielded properly by ";
if (it.wieldInfo & WIELDINFO_PREMIUM) {
s << "premium ";
}
if (!it.vocationString.empty()) {
s << it.vocationString;
} else {
s << "players";
}
if (it.wieldInfo & WIELDINFO_LEVEL) {
s << " of level " << it.minReqLevel << " or higher";
}
if (it.wieldInfo & WIELDINFO_MAGLV) {
if (it.wieldInfo & WIELDINFO_LEVEL) {
s << " and";
} else {
s << " of";
}
s << " magic level " << it.minReqMagicLevel << " or higher";
}
s << '.';
}
if (lookDistance <= 1) {
if (item) {
const uint32_t weight = item->getWeight();
if (weight != 0 && it.pickupable) {
s << '\n' << getWeightDescription(it, weight, item->getItemCount());
}
} else if (it.weight != 0 && it.pickupable) {
s << '\n' << getWeightDescription(it, it.weight);
}
}
if (item) {
const std::string& specialDescription = item->getSpecialDescription();
if (!specialDescription.empty()) {
s << '\n' << specialDescription;
} else if (lookDistance <= 1 && !it.description.empty()) {
s << '\n' << it.description;
}
} else if (lookDistance <= 1 && !it.description.empty()) {
s << '\n' << it.description;
}
if (it.allowDistRead && it.id >= 7369 && it.id <= 7371) {
if (!text && item) {
text = &item->getText();
}
if (text && !text->empty()) {
s << '\n' << *text;
}
}
return s.str();
}
std::string Item::getDescription(int32_t lookDistance) const
{
const ItemType& it = items[id];
return getDescription(it, lookDistance, this);
}
std::string Item::getNameDescription(const ItemType& it, const Item* item /*= nullptr*/, int32_t subType /*= -1*/, bool addArticle /*= true*/)
{
if (item) {
subType = item->getSubType();
}
std::ostringstream s;
const std::string& name = (item ? item->getName() : it.name);
if (!name.empty()) {
if (it.stackable && subType > 1) {
if (it.showCount) {
s << subType << ' ';
}
s << (item ? item->getPluralName() : it.getPluralName());
} else {
if (addArticle) {
const std::string& article = (item ? item->getArticle() : it.article);
if (!article.empty()) {
s << article << ' ';
}
}
s << name;
}
} else {
if (addArticle) {
s << "an ";
}
s << "item of type " << it.id;
}
return s.str();
}
std::string Item::getNameDescription() const
{
const ItemType& it = items[id];
return getNameDescription(it, this);
}
std::string Item::getWeightDescription(const ItemType& it, uint32_t weight, uint32_t count /*= 1*/)
{
std::ostringstream ss;
if (it.stackable && count > 1 && it.showCount != 0) {
ss << "They weigh ";
} else {
ss << "It weighs ";
}
if (weight < 10) {
ss << "0.0" << weight;
} else if (weight < 100) {
ss << "0." << weight;
} else {
std::string weightString = std::to_string(weight);
weightString.insert(weightString.end() - 2, '.');
ss << weightString;
}
ss << " oz.";
return ss.str();
}
std::string Item::getWeightDescription(uint32_t weight) const
{
const ItemType& it = Item::items[id];
return getWeightDescription(it, weight, getItemCount());
}
std::string Item::getWeightDescription() const
{
uint32_t weight = getWeight();
if (weight == 0) {
return std::string();
}
return getWeightDescription(weight);
}
void Item::setUniqueId(uint16_t n)
{
if (hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
return;
}
if (g_game.addUniqueItem(n, this)) {
getAttributes()->setUniqueId(n);
}
}
bool Item::canDecay() const
{
if (isRemoved()) {
return false;
}
const ItemType& it = Item::items[id];
if (getDecayTo() < 0 || it.decayTime == 0) {
return false;
}
if (hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) {
return false;
}
return true;
}
uint32_t Item::getWorth() const
{
switch (id) {
case ITEM_GOLD_COIN:
return count;
case ITEM_PLATINUM_COIN:
return count * 100;
case ITEM_CRYSTAL_COIN:
return count * 10000;
default:
return 0;
}
}
LightInfo Item::getLightInfo() const
{
const ItemType& it = items[id];
return {it.lightLevel, it.lightColor};
}
std::string ItemAttributes::emptyString;
int64_t ItemAttributes::emptyInt;
double ItemAttributes::emptyDouble;
bool ItemAttributes::emptyBool;
const std::string& ItemAttributes::getStrAttr(itemAttrTypes type) const
{
if (!isStrAttrType(type)) {
return emptyString;
}
const Attribute* attr = getExistingAttr(type);
if (!attr) {
return emptyString;
}
return *attr->value.string;
}
void ItemAttributes::setStrAttr(itemAttrTypes type, const std::string& value)
{
if (!isStrAttrType(type)) {
return;
}
if (value.empty()) {
return;
}
Attribute& attr = getAttr(type);
delete attr.value.string;
attr.value.string = new std::string(value);
}
void ItemAttributes::removeAttribute(itemAttrTypes type)
{
if (!hasAttribute(type)) {
return;
}
auto prev_it = attributes.rbegin();
if ((*prev_it).type == type) {
attributes.pop_back();
} else {
auto it = prev_it, end = attributes.rend();
while (++it != end) {
if ((*it).type == type) {
(*it) = attributes.back();
attributes.pop_back();
break;
}
}
}
attributeBits &= ~type;
}
int64_t ItemAttributes::getIntAttr(itemAttrTypes type) const
{
if (!isIntAttrType(type)) {
return 0;
}
const Attribute* attr = getExistingAttr(type);
if (!attr) {
return 0;
}
return attr->value.integer;
}
void ItemAttributes::setIntAttr(itemAttrTypes type, int64_t value)
{
if (!isIntAttrType(type)) {
return;
}
if (type == ITEM_ATTRIBUTE_ATTACK_SPEED && value < 100) {
value = 100;
}
getAttr(type).value.integer = value;
}
void ItemAttributes::increaseIntAttr(itemAttrTypes type, int64_t value)
{
setIntAttr(type, getIntAttr(type) + value);
}
const ItemAttributes::Attribute* ItemAttributes::getExistingAttr(itemAttrTypes type) const
{
if (hasAttribute(type)) {
for (const Attribute& attribute : attributes) {
if (attribute.type == type) {
return &attribute;
}
}
}
return nullptr;
}
ItemAttributes::Attribute& ItemAttributes::getAttr(itemAttrTypes type)
{
if (hasAttribute(type)) {
for (Attribute& attribute : attributes) {
if (attribute.type == type) {
return attribute;
}
}
}
attributeBits |= type;
attributes.emplace_back(type);
return attributes.back();
}
void Item::startDecaying()
{
g_game.startDecay(this);
}
bool Item::hasMarketAttributes() const
{
if (attributes == nullptr) {
return true;
}
for (const auto& attr : attributes->getList()) {
if (attr.type == ITEM_ATTRIBUTE_CHARGES) {
uint16_t charges = static_cast<uint16_t>(attr.value.integer);
if (charges != items[id].charges) {
return false;
}
} else if (attr.type == ITEM_ATTRIBUTE_DURATION) {
uint32_t duration = static_cast<uint32_t>(attr.value.integer);
if (duration != getDefaultDuration()) {
return false;
}
} else {
return false;
}
}
return true;
}
template<>
const std::string& ItemAttributes::CustomAttribute::get<std::string>() {
if (value.type() == typeid(std::string)) {
return boost::get<std::string>(value);
}
return emptyString;
}
template<>
const int64_t& ItemAttributes::CustomAttribute::get<int64_t>() {
if (value.type() == typeid(int64_t)) {
return boost::get<int64_t>(value);
}
return emptyInt;
}
template<>
const double& ItemAttributes::CustomAttribute::get<double>() {
if (value.type() == typeid(double)) {
return boost::get<double>(value);
}
return emptyDouble;
}
template<>
const bool& ItemAttributes::CustomAttribute::get<bool>() {
if (value.type() == typeid(bool)) {
return boost::get<bool>(value);
}
return emptyBool;
}
| 1 | 19,599 | so this attribute is used as u8 in code, but saves as int32? Why? | otland-forgottenserver | cpp |
@@ -41,7 +41,7 @@ from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEnco
# Map class to Cap'n Proto schema union attribute
_CLASS_ATTR_MAP = {
ScalarEncoder: "scalarEncoder",
- AdaptiveScalarEncoder: "adaptivescalar",
+ AdaptiveScalarEncoder: "adaptiveScalarEncoder",
DateEncoder: "dateEncoder",
LogEncoder: "logEncoder",
CategoryEncoder: "categoryEncoder", | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.encoders.base import Encoder
from nupic.encoders.scalar import ScalarEncoder
from nupic.encoders.adaptivescalar import AdaptiveScalarEncoder
from nupic.encoders.date import DateEncoder
from nupic.encoders.logenc import LogEncoder
from nupic.encoders.category import CategoryEncoder
from nupic.encoders.sdrcategory import SDRCategoryEncoder
from nupic.encoders.delta import DeltaEncoder
from nupic.encoders.scalarspace import ScalarSpaceEncoder
from nupic.encoders.pass_through_encoder import PassThroughEncoder
from nupic.encoders.sparse_pass_through_encoder import SparsePassThroughEncoder
from nupic.encoders.coordinate import CoordinateEncoder
from nupic.encoders.geospatial_coordinate import GeospatialCoordinateEncoder
# multiencoder must be imported last because it imports * from this module!
from nupic.encoders.utils import bitsToString
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
# Map class to Cap'n Proto schema union attribute
_CLASS_ATTR_MAP = {
ScalarEncoder: "scalarEncoder",
AdaptiveScalarEncoder: "adaptivescalar",
DateEncoder: "dateEncoder",
LogEncoder: "logEncoder",
CategoryEncoder: "categoryEncoder",
CoordinateEncoder: "coordinateEncoder",
SDRCategoryEncoder: "sdrCategoryEncoder",
DeltaEncoder: "deltaEncoder",
PassThroughEncoder: "passThroughEncoder",
SparsePassThroughEncoder: "sparsePassThroughEncoder",
RandomDistributedScalarEncoder: "randomDistributedScalarEncoder"
}
# Invert for fast lookup in MultiEncoder.read()
_ATTR_CLASS_MAP = {value:key for key, value in _CLASS_ATTR_MAP.items()}
class MultiEncoder(Encoder):
"""A MultiEncoder encodes a dictionary or object with
multiple components. A MultiEncode contains a number
of sub-encoders, each of which encodes a separate component."""
# TODO expand this docstring to explain how the multiple encoders are combined
def __init__(self, encoderDescriptions=None):
self.width = 0
self.encoders = []
self.description = []
self.name = ''
if encoderDescriptions is not None:
self.addMultipleEncoders(encoderDescriptions)
def setFieldStats(self, fieldName, fieldStatistics ):
for (name, encoder, offset) in self.encoders:
encoder.setFieldStats(name, fieldStatistics)
def addEncoder(self, name, encoder):
self.encoders.append((name, encoder, self.width))
for d in encoder.getDescription():
self.description.append((d[0], d[1] + self.width))
self.width += encoder.getWidth()
self._flattenedEncoderList = None
self._flattenedFieldTypeList = None
def encodeIntoArray(self, obj, output):
for name, encoder, offset in self.encoders:
encoder.encodeIntoArray(self._getInputValue(obj, name), output[offset:])
def getDescription(self):
return self.description
def getWidth(self):
"""Represents the sum of the widths of each fields encoding."""
return self.width
def setLearning(self,learningEnabled):
encoders = self.getEncoderList()
for encoder in encoders:
encoder.setLearning(learningEnabled)
return
def encodeField(self, fieldName, value):
for name, encoder, offset in self.encoders:
if name == fieldName:
return encoder.encode(value)
def encodeEachField(self, inputRecord):
encodings = []
for name, encoder, offset in self.encoders:
encodings.append(encoder.encode(getattr(inputRecord, name)))
return encodings
def addMultipleEncoders(self, fieldEncodings):
"""
fieldEncodings -- a dict of dicts, mapping field names to the field params
dict.
Each field params dict has the following keys
1) data fieldname that matches the key ('fieldname')
2) an encoder type ('type')
3) and the encoder params (all other keys)
For example,
fieldEncodings={
'dateTime': dict(fieldname='dateTime', type='DateEncoder',
timeOfDay=(5,5)),
'attendeeCount': dict(fieldname='attendeeCount', type='ScalarEncoder',
name='attendeeCount', minval=0, maxval=250,
clipInput=True, w=5, resolution=10),
'consumption': dict(fieldname='consumption',type='ScalarEncoder',
name='consumption', minval=0,maxval=110,
clipInput=True, w=5, resolution=5),
}
would yield a vector with a part encoded by the DateEncoder,
and to parts seperately taken care of by the ScalarEncoder with the specified parameters.
The three seperate encodings are then merged together to the final vector, in such a way that
they are always at the same location within the vector.
"""
# Sort the encoders so that they end up in a controlled order
encoderList = sorted(fieldEncodings.items())
for key, fieldParams in encoderList:
if ':' not in key and fieldParams is not None:
fieldParams = fieldParams.copy()
fieldName = fieldParams.pop('fieldname')
encoderName = fieldParams.pop('type')
try:
self.addEncoder(fieldName, eval(encoderName)(**fieldParams))
except TypeError, e:
print ("#### Error in constructing %s encoder. Possibly missing "
"some required constructor parameters. Parameters "
"that were provided are: %s" % (encoderName, fieldParams))
raise
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.encoders = [None] * len(proto.encoders)
encoder.width = 0
for index, encoderProto in enumerate(proto.encoders):
# Identify which attr is set in union
encoderType = encoderProto.which()
encoderDetails = getattr(encoderProto, encoderType)
encoder.encoders[index] = (
encoderProto.name,
# Call class.read() where class is determined by _ATTR_CLASS_MAP
_ATTR_CLASS_MAP.get(encoderType).read(encoderDetails),
encoderProto.offset
)
encoder.width += encoder.encoders[index][1].getWidth()
# Derive description from encoder list
encoder.description = [(enc[1].name, enc[2]) for enc in encoder.encoders]
encoder.name = proto.name
return encoder
def write(self, proto):
proto.init("encoders", len(self.encoders))
for index, (name, encoder, offset) in enumerate(self.encoders):
encoderProto = proto.encoders[index]
encoderType = _CLASS_ATTR_MAP.get(encoder.__class__)
encoderProto.init(encoderType)
encoderDetails = getattr(encoderProto, encoderType)
encoder.write(encoderDetails)
encoderProto.name = name
encoderProto.offset = offset
proto.name = self.name
| 1 | 20,204 | can RDSE be added to the list? It has its capnp files. | numenta-nupic | py |
@@ -35,11 +35,11 @@ import (
// certificateToInjectableFunc converts a given certificate to the reconcile requests for the corresponding injectables
// (webhooks, api services, etc) that reference it.
-type certificateToInjectableFunc func(log logr.Logger, cl client.Client, certName types.NamespacedName) []ctrl.Request
+type certificateToInjectableFunc func(log logr.Logger, cl client.Reader, certName types.NamespacedName) []ctrl.Request
// buildCertToInjectableFunc creates a certificateToInjectableFunc that maps from certificates to the given type of injectable.
func buildCertToInjectableFunc(listTyp runtime.Object, resourceName string) certificateToInjectableFunc {
- return func(log logr.Logger, cl client.Client, certName types.NamespacedName) []ctrl.Request {
+ return func(log logr.Logger, cl client.Reader, certName types.NamespacedName) []ctrl.Request {
log = log.WithValues("type", resourceName)
objs := listTyp.DeepCopyObject()
if err := cl.List(context.Background(), objs, client.MatchingFields{injectFromPath: certName.String()}); err != nil { | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cainjector
import (
"context"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
)
// setup for indexers used to trigger reconciliation on injected CA data.
// certificateToInjectableFunc converts a given certificate to the reconcile requests for the corresponding injectables
// (webhooks, api services, etc) that reference it.
type certificateToInjectableFunc func(log logr.Logger, cl client.Client, certName types.NamespacedName) []ctrl.Request
// buildCertToInjectableFunc creates a certificateToInjectableFunc that maps from certificates to the given type of injectable.
func buildCertToInjectableFunc(listTyp runtime.Object, resourceName string) certificateToInjectableFunc {
return func(log logr.Logger, cl client.Client, certName types.NamespacedName) []ctrl.Request {
log = log.WithValues("type", resourceName)
objs := listTyp.DeepCopyObject()
if err := cl.List(context.Background(), objs, client.MatchingFields{injectFromPath: certName.String()}); err != nil {
log.Error(err, "unable to fetch injectables associated with certificate")
return nil
}
var reqs []ctrl.Request
if err := meta.EachListItem(objs, func(obj runtime.Object) error {
metaInfo, err := meta.Accessor(obj)
if err != nil {
log.Error(err, "unable to get metadata from list item")
// continue on error
return nil
}
reqs = append(reqs, ctrl.Request{NamespacedName: types.NamespacedName{
Name: metaInfo.GetName(),
Namespace: metaInfo.GetNamespace(),
}})
return nil
}); err != nil {
log.Error(err, "unable get items from list")
return nil
}
return reqs
}
}
// secretForCertificateMapper is a Mapper that converts secrets up to injectables, through certificates.
type secretForCertificateMapper struct {
client.Client
log logr.Logger
certificateToInjectable certificateToInjectableFunc
}
func (m *secretForCertificateMapper) Map(obj handler.MapObject) []ctrl.Request {
// grab the certificate, if it exists
certName := OwningCertForSecret(obj.Object.(*corev1.Secret))
if certName == nil {
return nil
}
secretName := types.NamespacedName{Name: obj.Meta.GetName(), Namespace: obj.Meta.GetNamespace()}
log := m.log.WithValues("secret", secretName, "certificate", *certName)
var cert cmapi.Certificate
// confirm that a service owns this cert
if err := m.Client.Get(context.Background(), *certName, &cert); err != nil {
// TODO(directxman12): check for not found error?
log.Error(err, "unable to fetch certificate that owns the secret")
return nil
}
return m.certificateToInjectable(log, m.Client, *certName)
}
// certMapper is a mapper that converts Certificates up to injectables
type certMapper struct {
client.Client
log logr.Logger
toInjectable certificateToInjectableFunc
}
func (m *certMapper) Map(obj handler.MapObject) []ctrl.Request {
certName := types.NamespacedName{Name: obj.Meta.GetName(), Namespace: obj.Meta.GetNamespace()}
log := m.log.WithValues("certificate", certName)
return m.toInjectable(log, m.Client, certName)
}
var (
// injectFromPath is the index key used to look up the value of inject-ca-from on targeted objects
injectFromPath = ".metadata.annotations.inject-ca-from"
)
// injectableCAFromIndexer is an IndexerFunc indexing on certificates
// referenced by injectables.
func injectableCAFromIndexer(rawObj runtime.Object) []string {
metaInfo, err := meta.Accessor(rawObj)
if err != nil {
return nil
}
// skip invalid certificate names
certNameRaw := metaInfo.GetAnnotations()[cmapi.WantInjectAnnotation]
if certNameRaw == "" {
return nil
}
certName := splitNamespacedName(certNameRaw)
if certName.Namespace == "" {
return nil
}
return []string{certNameRaw}
}
// secretToInjectableFunc converts a given certificate to the reconcile requests for the corresponding injectables
// (webhooks, api services, etc) that reference it.
type secretToInjectableFunc func(log logr.Logger, cl client.Client, certName types.NamespacedName) []ctrl.Request
// buildSecretToInjectableFunc creates a certificateToInjectableFunc that maps from certificates to the given type of injectable.
func buildSecretToInjectableFunc(listTyp runtime.Object, resourceName string) secretToInjectableFunc {
return func(log logr.Logger, cl client.Client, secretName types.NamespacedName) []ctrl.Request {
log = log.WithValues("type", resourceName)
objs := listTyp.DeepCopyObject()
if err := cl.List(context.Background(), objs, client.MatchingFields{injectFromSecretPath: secretName.String()}); err != nil {
log.Error(err, "unable to fetch injectables associated with secret")
return nil
}
var reqs []ctrl.Request
if err := meta.EachListItem(objs, func(obj runtime.Object) error {
metaInfo, err := meta.Accessor(obj)
if err != nil {
log.Error(err, "unable to get metadata from list item")
// continue on error
return nil
}
reqs = append(reqs, ctrl.Request{NamespacedName: types.NamespacedName{
Name: metaInfo.GetName(),
Namespace: metaInfo.GetNamespace(),
}})
return nil
}); err != nil {
log.Error(err, "unable get items from list")
return nil
}
return reqs
}
}
// secretForInjectableMapper is a Mapper that converts secrets to injectables
// via the 'inject-ca-from-secret' annotation
type secretForInjectableMapper struct {
client.Client
log logr.Logger
secretToInjectable secretToInjectableFunc
}
func (m *secretForInjectableMapper) Map(obj handler.MapObject) []ctrl.Request {
secretName := types.NamespacedName{Namespace: obj.Meta.GetNamespace(), Name: obj.Meta.GetName()}
log := m.log.WithValues("secret", secretName)
return m.secretToInjectable(log, m.Client, secretName)
}
var (
// injectFromSecretPath is the index key used to look up the value of
// inject-ca-from-secret on targeted objects
injectFromSecretPath = ".metadata.annotations.inject-ca-from-secret"
)
// injectableCAFromSecretIndexer is an IndexerFunc indexing on secrets
// referenced by injectables.
func injectableCAFromSecretIndexer(rawObj runtime.Object) []string {
metaInfo, err := meta.Accessor(rawObj)
if err != nil {
return nil
}
// skip invalid secret names
secretNameRaw := metaInfo.GetAnnotations()[cmapi.WantInjectFromSecretAnnotation]
if secretNameRaw == "" {
return nil
}
secretName := splitNamespacedName(secretNameRaw)
if secretName.Namespace == "" {
return nil
}
return []string{secretNameRaw}
}
| 1 | 23,405 | These functions only ever read from the API and should always be reading from a cache, so I have made that explicit by expecting a Reader here and supplying a `cache.Cache` as the client. | jetstack-cert-manager | go |
@@ -1495,6 +1495,7 @@ mangle_direct_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
if (target == (app_pc)retaddr) {
LOG(THREAD, LOG_INTERP, 3, "found call to next instruction " PFX "\n", target);
} else {
+ /* XXX i#3307: necessary to instr_set_translation_mangling_epilogue? */
check_return_handle_call(dcontext, ilist, next_instr);
}
/* now do the normal thing for a call */ | 1 | /* ******************************************************************************
* Copyright (c) 2010-2018 Google, Inc. All rights reserved.
* Copyright (c) 2010 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* file "mangle.c" */
#include "../globals.h"
#include "../link.h"
#include "../fragment.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "decode_fast.h"
#include "disassemble.h"
#include "../hashtable.h"
#include "../fcache.h" /* for in_fcache */
#ifdef STEAL_REGISTER
# include "steal_reg.h"
#endif
#include "instrument.h" /* for dr_insert_call */
#include "../translate.h"
#ifdef RCT_IND_BRANCH
# include "../rct.h" /* rct_add_rip_rel_addr */
#endif
#ifdef UNIX
# include <sys/syscall.h>
#endif
#include <string.h> /* for memset */
#ifdef ANNOTATIONS
# include "../annotations.h"
#endif
/* Make code more readable by shortening long lines.
* We mark everything we add as non-app instr.
*/
#define POST instrlist_meta_postinsert
#define PRE instrlist_meta_preinsert
/***************************************************************************/
void
mangle_arch_init(void)
{
/* Nothing yet. */
}
/* Convert a short-format CTI into an equivalent one using
* near-rel-format.
* Remember, the target is kept in the 0th src array position,
* and has already been converted from an 8-bit offset to an
* absolute PC, so we can just pretend instructions are longer
* than they really are.
*/
instr_t *
convert_to_near_rel_arch(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
int opcode = instr_get_opcode(instr);
app_pc target = NULL;
if (opcode == OP_jmp_short) {
instr_set_opcode(instr, OP_jmp);
return instr;
}
if (OP_jo_short <= opcode && opcode <= OP_jnle_short) {
/* WARNING! following is OP_ enum order specific */
instr_set_opcode(instr, opcode - OP_jo_short + OP_jo);
return instr;
}
if (OP_loopne <= opcode && opcode <= OP_jecxz) {
uint mangled_sz;
uint offs;
/*
* from "info as" on GNU/linux system:
Note that the `jcxz', `jecxz', `loop', `loopz', `loope', `loopnz'
and `loopne' instructions only come in byte displacements, so that if
you use these instructions (`gcc' does not use them) you may get an
error message (and incorrect code). The AT&T 80386 assembler tries to
get around this problem by expanding `jcxz foo' to
jcxz cx_zero
jmp cx_nonzero
cx_zero: jmp foo
cx_nonzero:
*
* We use that same expansion, but we want to treat the entire
* three-instruction sequence as a single conditional branch.
* Thus we use a special instruction that stores the entire
* instruction sequence as mangled bytes, yet w/ a valid target operand
* (xref PR 251646).
* patch_branch and instr_invert_cbr
* know how to find the target pc (final 4 of 9 bytes).
* When decoding anything we've written we know the only jcxz or
* loop* instructions are part of these rewritten packages, and
* we use remangle_short_rewrite to read back in the instr.
* (have to do this everywhere call decode() except original
* interp, plus in input_trace())
*
* An alternative is to change 'jcxz foo' to:
<save eflags>
cmpb %cx,$0
je foo_restore
<restore eflags>
...
foo_restore: <restore eflags>
foo:
* However the added complications of restoring the eflags on
* the taken-branch path made me choose the former solution.
*/
/* SUMMARY:
* expand 'shortjump foo' to:
shortjump taken
jmp-short nottaken
taken: jmp foo
nottaken:
*/
if (ilist != NULL) {
/* PR 266292: for meta instrs, insert separate instrs */
/* reverse order */
opnd_t tgt = instr_get_target(instr);
instr_t *nottaken = INSTR_CREATE_label(dcontext);
instr_t *taken = INSTR_CREATE_jmp(dcontext, tgt);
ASSERT(instr_is_meta(instr));
instrlist_meta_postinsert(ilist, instr, nottaken);
instrlist_meta_postinsert(ilist, instr, taken);
instrlist_meta_postinsert(
ilist, instr,
INSTR_CREATE_jmp_short(dcontext, opnd_create_instr(nottaken)));
instr_set_target(instr, opnd_create_instr(taken));
return taken;
}
if (opnd_is_near_pc(instr_get_target(instr)))
target = opnd_get_pc(instr_get_target(instr));
else if (opnd_is_near_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
/* XXX: not using get_app_instr_xl8() b/c drdecodelib doesn't link
* mangle_shared.c.
*/
target = instr_get_translation(tgt);
if (target == NULL && instr_raw_bits_valid(tgt))
target = instr_get_raw_bits(tgt);
ASSERT(target != NULL);
} else
ASSERT_NOT_REACHED();
/* PR 251646: cti_short_rewrite: target is in src0, so operands are
* valid, but raw bits must also be valid, since they hide the multiple
* instrs. For x64, it is marked for re-relativization, but it's
* special since the target must be obtained from src0 and not
* from the raw bits (since that might not reach).
*/
/* need 9 bytes + possible addr prefix */
mangled_sz = CTI_SHORT_REWRITE_LENGTH;
if (!reg_is_pointer_sized(opnd_get_reg(instr_get_src(instr, 1))))
mangled_sz++; /* need addr prefix */
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
offs = 0;
if (mangled_sz > CTI_SHORT_REWRITE_LENGTH) {
instr_set_raw_byte(instr, offs, ADDR_PREFIX_OPCODE);
offs++;
}
/* first 2 bytes: jecxz 8-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(opcode));
offs++;
/* remember pc-relative offsets are from start of next instr */
instr_set_raw_byte(instr, offs, (byte)2);
offs++;
/* next 2 bytes: jmp-short 8-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(OP_jmp_short));
offs++;
instr_set_raw_byte(instr, offs, (byte)5);
offs++;
/* next 5 bytes: jmp 32-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(OP_jmp));
offs++;
/* for x64 we may not reach, but we go ahead and try */
instr_set_raw_word(instr, offs, (int)(target - (instr->bytes + mangled_sz)));
offs += sizeof(int);
ASSERT(offs == mangled_sz);
LOG(THREAD, LOG_INTERP, 2, "convert_to_near_rel: jecxz/loop* opcode\n");
/* original target operand is still valid */
instr_set_operands_valid(instr, true);
return instr;
}
LOG(THREAD, LOG_INTERP, 1, "convert_to_near_rel: unknown opcode: %d %s\n", opcode,
decode_opcode_name(opcode));
ASSERT_NOT_REACHED(); /* conversion not possible OR not a short-form cti */
return instr;
}
/* For jecxz and loop*, we create 3 instructions in a single
* instr that we treat like a single conditional branch.
* On re-decoding our own output we need to recreate that instr.
* This routine assumes that the instructions encoded at pc
* are indeed a mangled cti short.
* Assumes that the first instr has already been decoded into instr,
* that pc points to the start of that instr.
* Converts instr into a new 3-raw-byte-instr with a private copy of the
* original raw bits.
* Optionally modifies the target to "target" if "target" is non-null.
* Returns the pc of the instruction after the remangled sequence.
*/
byte *
remangle_short_rewrite(dcontext_t *dcontext, instr_t *instr, byte *pc, app_pc target)
{
uint mangled_sz = CTI_SHORT_REWRITE_LENGTH;
ASSERT(instr_is_cti_short_rewrite(instr, pc));
if (*pc == ADDR_PREFIX_OPCODE)
mangled_sz++;
/* first set the target in the actual operand src0 */
if (target == NULL) {
/* acquire existing absolute target */
int rel_target = *((int *)(pc + mangled_sz - 4));
target = pc + mangled_sz + rel_target;
}
instr_set_target(instr, opnd_create_pc(target));
/* now set up the bundle of raw instructions
* we've already read the first 2-byte instruction, jecxz/loop*
* they all take up mangled_sz bytes
*/
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
instr_set_raw_bytes(instr, pc, mangled_sz);
/* for x64 we may not reach, but we go ahead and try */
instr_set_raw_word(instr, mangled_sz - 4, (int)(target - (pc + mangled_sz)));
/* now make operands valid */
instr_set_operands_valid(instr, true);
return (pc + mangled_sz);
}
/***************************************************************************/
#if !defined(STANDALONE_DECODER)
int
insert_out_of_line_context_switch(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, bool save, byte *encode_pc)
{
if (save) {
/* We adjust the stack so the return address will not be clobbered,
* so we can have call/return pair to take advantage of hardware
* call return stack for better performance.
* Xref emit_clean_call_save @ x86/emit_utils.c
* The precise adjustment amount is relied upon in
* find_next_fragment_from_gencode()'s handling of in_clean_call_save().
*/
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
-(int)(get_clean_call_switch_stack_size() +
get_clean_call_temp_stack_size()),
OPSZ_lea)));
}
/* We document to clients that we use r11 if we need an indirect call here. */
insert_reachable_cti(dcontext, ilist, instr, encode_pc,
save ? get_clean_call_save(dcontext _IF_X64(GENCODE_X64))
: get_clean_call_restore(dcontext _IF_X64(GENCODE_X64)),
false /*call*/, true /*returns*/, false /*!precise*/, DR_REG_R11,
NULL);
return get_clean_call_switch_stack_size();
}
void
insert_clear_eflags(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist,
instr_t *instr)
{
/* clear eflags for callee's usage */
if (cci == NULL || !cci->skip_clear_flags) {
if (dynamo_options.cleancall_ignore_eflags) {
/* we still clear DF since some compiler assumes
* DF is cleared at each function.
*/
PRE(ilist, instr, INSTR_CREATE_cld(dcontext));
} else {
/* on x64 a push immed is sign-extended to 64-bit */
PRE(ilist, instr, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0)));
PRE(ilist, instr, INSTR_CREATE_popf(dcontext));
}
}
}
/* Pushes not only the GPRs but also xmm/ymm, xip, and xflags, in
* priv_mcontext_t order.
* The current stack pointer alignment should be passed. Use 1 if
* unknown (NOT 0).
* Returns the amount of data pushed. Does NOT fix up the xsp value pushed
* to be the value prior to any pushes for x64 as no caller needs that
* currently (they all build a priv_mcontext_t and have to do further xsp
* fixups anyway).
* Includes xmm0-5 for PR 264138.
*/
uint
insert_push_all_registers(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr, uint alignment,
opnd_t push_pc, reg_id_t scratch /*optional*/)
{
uint dstack_offs = 0;
int offs_beyond_xmm = 0;
if (cci == NULL)
cci = &default_clean_call_info;
if (cci->preserve_mcontext || cci->num_simd_skip != NUM_SIMD_REGS) {
int offs = XMM_SLOTS_SIZE + PRE_XMM_PADDING;
if (cci->preserve_mcontext && cci->skip_save_flags) {
offs_beyond_xmm = 2 * XSP_SZ; /* pc and flags */
offs += offs_beyond_xmm;
}
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, -offs)));
dstack_offs += offs;
}
if (preserve_xmm_caller_saved()) {
/* PR 264138: we must preserve xmm0-5 if on a 64-bit kernel */
int i;
/* PR 266305: see discussion in emit_fcache_enter_shared on
* which opcode is better. Note that the AMD optimization
* guide says to use movlps+movhps for unaligned stores, but
* for simplicity and smaller code I'm using movups anyway.
*/
/* XXX i#438: once have SandyBridge processor need to measure
* cost of vmovdqu and whether worth arranging 32-byte alignment
* for all callers. B/c we put ymm at end of priv_mcontext_t, we do
* currently have 32-byte alignment for clean calls.
*/
uint opcode = move_mm_reg_opcode(ALIGNED(alignment, 16), ALIGNED(alignment, 32));
ASSERT(proc_has_feature(FEATURE_SSE));
for (i = 0; i < NUM_SIMD_SAVED; i++) {
if (!cci->simd_skip[i]) {
PRE(ilist, instr,
instr_create_1dst_1src(
dcontext, opcode,
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + i * XMM_SAVED_REG_SIZE +
offs_beyond_xmm,
OPSZ_SAVED_XMM),
opnd_create_reg(REG_SAVED_XMM0 + (reg_id_t)i)));
}
}
ASSERT(i * XMM_SAVED_REG_SIZE == XMM_SAVED_SIZE);
ASSERT(XMM_SAVED_SIZE <= XMM_SLOTS_SIZE);
}
/* pc and aflags */
if (!cci->skip_save_flags) {
ASSERT(offs_beyond_xmm == 0);
if (opnd_is_immed_int(push_pc))
PRE(ilist, instr, INSTR_CREATE_push_imm(dcontext, push_pc));
else
PRE(ilist, instr, INSTR_CREATE_push(dcontext, push_pc));
dstack_offs += XSP_SZ;
PRE(ilist, instr, INSTR_CREATE_pushf(dcontext));
dstack_offs += XSP_SZ;
} else {
ASSERT(offs_beyond_xmm == 2 * XSP_SZ || !cci->preserve_mcontext);
/* for cci->preserve_mcontext we added to the lea above so we ignore push_pc */
}
# ifdef X64
/* keep priv_mcontext_t order */
if (!cci->reg_skip[REG_R15 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R15)));
if (!cci->reg_skip[REG_R14 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R14)));
if (!cci->reg_skip[REG_R13 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R13)));
if (!cci->reg_skip[REG_R12 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R12)));
if (!cci->reg_skip[REG_R11 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R11)));
if (!cci->reg_skip[REG_R10 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R10)));
if (!cci->reg_skip[REG_R9 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R9)));
if (!cci->reg_skip[REG_R8 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R8)));
if (!cci->reg_skip[REG_RAX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RAX)));
if (!cci->reg_skip[REG_RCX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RCX)));
if (!cci->reg_skip[REG_RDX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RDX)));
if (!cci->reg_skip[REG_RBX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RBX)));
/* we do NOT match pusha xsp value */
if (!cci->reg_skip[REG_RSP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RSP)));
if (!cci->reg_skip[REG_RBP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RBP)));
if (!cci->reg_skip[REG_RSI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RSI)));
if (!cci->reg_skip[REG_RDI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RDI)));
dstack_offs += (NUM_GP_REGS - cci->num_regs_skip) * XSP_SZ;
# else
PRE(ilist, instr, INSTR_CREATE_pusha(dcontext));
dstack_offs += 8 * XSP_SZ;
# endif
ASSERT(cci->skip_save_flags || cci->num_simd_skip != 0 || cci->num_regs_skip != 0 ||
dstack_offs == (uint)get_clean_call_switch_stack_size());
return dstack_offs;
}
/* User should pass the alignment from insert_push_all_registers: i.e., the
* alignment at the end of all the popping, not the alignment prior to
* the popping.
*/
void
insert_pop_all_registers(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist,
instr_t *instr, uint alignment)
{
int offs_beyond_xmm = 0;
if (cci == NULL)
cci = &default_clean_call_info;
# ifdef X64
/* in priv_mcontext_t order */
if (!cci->reg_skip[REG_RDI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RDI)));
if (!cci->reg_skip[REG_RSI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RSI)));
if (!cci->reg_skip[REG_RBP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBP)));
/* skip xsp by popping into dead rbx */
if (!cci->reg_skip[REG_RSP - REG_XAX]) {
ASSERT(!cci->reg_skip[REG_RBX - REG_XAX]);
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBX)));
}
if (!cci->reg_skip[REG_RBX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBX)));
if (!cci->reg_skip[REG_RDX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RDX)));
if (!cci->reg_skip[REG_RCX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RCX)));
if (!cci->reg_skip[REG_RAX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RAX)));
if (!cci->reg_skip[REG_R8 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R8)));
if (!cci->reg_skip[REG_R9 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R9)));
if (!cci->reg_skip[REG_R10 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R10)));
if (!cci->reg_skip[REG_R11 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R11)));
if (!cci->reg_skip[REG_R12 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R12)));
if (!cci->reg_skip[REG_R13 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R13)));
if (!cci->reg_skip[REG_R14 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R14)));
if (!cci->reg_skip[REG_R15 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R15)));
# else
PRE(ilist, instr, INSTR_CREATE_popa(dcontext));
# endif
if (!cci->skip_save_flags) {
PRE(ilist, instr, INSTR_CREATE_popf(dcontext));
offs_beyond_xmm = XSP_SZ; /* pc */
;
} else if (cci->preserve_mcontext) {
offs_beyond_xmm = 2 * XSP_SZ; /* aflags + pc */
}
if (preserve_xmm_caller_saved()) {
/* PR 264138: we must preserve xmm0-5 if on a 64-bit kernel */
int i;
/* See discussion in emit_fcache_enter_shared on which opcode
* is better. */
uint opcode = move_mm_reg_opcode(ALIGNED(alignment, 32), ALIGNED(alignment, 16));
ASSERT(proc_has_feature(FEATURE_SSE));
for (i = 0; i < NUM_SIMD_SAVED; i++) {
if (!cci->simd_skip[i]) {
PRE(ilist, instr,
instr_create_1dst_1src(
dcontext, opcode, opnd_create_reg(REG_SAVED_XMM0 + (reg_id_t)i),
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + i * XMM_SAVED_REG_SIZE +
offs_beyond_xmm,
OPSZ_SAVED_XMM)));
}
}
ASSERT(i * XMM_SAVED_REG_SIZE == XMM_SAVED_SIZE);
ASSERT(XMM_SAVED_SIZE <= XMM_SLOTS_SIZE);
}
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + XMM_SLOTS_SIZE + offs_beyond_xmm)));
}
reg_id_t
shrink_reg_for_param(reg_id_t regular, opnd_t arg)
{
# ifdef X64
if (opnd_get_size(arg) == OPSZ_4) { /* we ignore var-sized */
/* PR 250976 #2: leave 64-bit only if an immed w/ top bit set (we
* assume user wants sign-extension; that is after all what happens
* on a push of a 32-bit immed) */
if (!opnd_is_immed_int(arg) || (opnd_get_immed_int(arg) & 0x80000000) == 0)
return reg_64_to_32(regular);
}
# endif
return regular;
}
/* Returns the change in the stack pointer.
* N.B.: due to stack alignment and minimum stack reservation, do
* not use parameters involving esp/rsp, as its value can change!
*
* This routine only supports passing arguments that are integers or
* pointers of a size equal or smaller than the register size: i.e., no
* floating-point, multimedia, or aggregate data types.
*
* For 64-bit mode, if a 32-bit immediate integer is specified as an
* argument and it has its top bit set, we assume it is intended to be
* sign-extended to 64-bits; otherwise we zero-extend it.
*
* For 64-bit mode, variable-sized argument operands may not work
* properly.
*
* Arguments that reference REG_XSP will work for clean calls, but are not guaranteed
* to work for non-clean, especially for 64-bit where we align, etc. Arguments that
* reference sub-register portions of REG_XSP are not supported.
*
* XXX PR 307874: w/ a post optimization pass, or perhaps more clever use of
* existing passes, we could do much better on calling convention and xsp conflicting
* args. We should also really consider inlining client callees (PR 218907), since
* clean calls for 64-bit are enormous (71 instrs/264 bytes for 2-arg x64; 26
* instrs/99 bytes for x86) and we could avoid all the xmm saves and replace pushf w/
* lahf.
*/
uint
insert_parameter_preparation(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool clean_call, uint num_args, opnd_t *args)
{
uint i;
int r;
uint preparm_padding = 0;
uint param_stack = 0, total_stack = 0;
bool push = true;
bool restore_xax = false;
bool restore_xsp = false;
/* we need two passes for PR 250976 optimization */
/* Push/mov in reverse order. We need a label so we can also add
* instrs prior to the regular param prep. So params are POST-mark, while
* pre-param-prep is POST-prev or PRE-mark.
*/
# ifdef X64
uint arg_pre_push = 0, total_pre_push = 0;
# endif
instr_t *prev = (instr == NULL) ? instrlist_last(ilist) : instr_get_prev(instr);
instr_t *mark = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, mark);
/* For a clean call, xax is dead (clobbered by prepare_for_clean_call()).
* Rather than use as scratch and restore prior to each param that uses it,
* we restore once up front if any use it, and use regparms[0] as scratch,
* which is symmetric with non-clean-calls: regparms[0] is dead since we're
* doing args in reverse order. However, we then can't use regparms[0]
* directly if referenced in earlier params, but similarly for xax, so
* there's no clear better way. (prepare_for_clean_call also clobbers xsp,
* but we just disallow args that use it).
*/
ASSERT(num_args == 0 || args != NULL);
/* We can get away w/ one pass, except for PR 250976 we want calling conv
* regs to be able to refer to priv_mcontext_t as well as potentially being
* pushed: but we need to know the total # pushes ahead of time (since hard
* to mark for post-patching)
*/
for (i = 0; i < num_args; i++) {
IF_X64(bool is_pre_push = false;)
for (r = 0; r < opnd_num_regs_used(args[i]); r++) {
reg_id_t used = opnd_get_reg_used(args[i], r);
IF_X64(int parm;)
LOG(THREAD, LOG_INTERP, 4, "ipp: considering arg %d reg %d == %s\n", i, r,
reg_names[used]);
if (clean_call && !restore_xax && reg_overlap(used, REG_XAX))
restore_xax = true;
if (reg_overlap(used, REG_XSP)) {
IF_X64(CLIENT_ASSERT(clean_call,
"Non-clean-call argument: REG_XSP not supported"));
CLIENT_ASSERT(used == REG_XSP,
"Call argument: sub-reg-xsp not supported");
if (clean_call && /*x64*/ parameters_stack_padded() && !restore_xsp)
restore_xsp = true;
}
# ifdef X64
/* PR 250976 #A: count the number of pre-pushes we need */
parm = reg_parameter_num(used);
/* We can read a register used in an earlier arg since we store that
* arg later (we do reverse order), except arg0, which we use as
* scratch (we don't always need it, but not worth another pre-pass
* through all args to find out), and xsp. Otherwise, if a plain reg,
* we point at mcontext (we restore xsp slot in mcontext if nec.).
* If a mem ref, we need to pre-push onto stack.
* N.B.: this conditional is duplicated in 2nd loop.
*/
if (!is_pre_push &&
((parm == 0 && num_args > 1) || parm > (int)i ||
reg_overlap(used, REG_XSP)) &&
(!clean_call || !opnd_is_reg(args[i]))) {
total_pre_push++;
is_pre_push = true; /* ignore further regs in same arg */
}
# endif
}
}
if (parameters_stack_padded()) {
/* For x64, supposed to reserve rsp space in function prologue; we
* do next best thing and reserve it prior to setting up the args.
*/
push = false; /* store args to xsp offsets instead of pushing them */
total_stack = REGPARM_MINSTACK;
if (num_args > NUM_REGPARM)
total_stack += XSP_SZ * (num_args - NUM_REGPARM);
param_stack = total_stack;
IF_X64(total_stack += XSP_SZ * total_pre_push);
/* We assume rsp is currently 16-byte aligned. End of arguments is supposed
* to be 16-byte aligned for x64 SysV (note that retaddr will then make
* rsp 8-byte-aligned, which is ok: callee has to rectify that).
* For clean calls, prepare_for_clean_call leaves rsp aligned for x64.
* XXX PR 218790: we require users of dr_insert_call to ensure
* alignment; should we put in support to dynamically align?
*/
preparm_padding =
ALIGN_FORWARD_UINT(total_stack, REGPARM_END_ALIGN) - total_stack;
total_stack += preparm_padding;
/* we have to wait to insert the xsp adjust */
} else {
ASSERT(NUM_REGPARM == 0);
ASSERT(push);
IF_X64(ASSERT(total_pre_push == 0));
total_stack = XSP_SZ * num_args;
}
LOG(THREAD, LOG_INTERP, 3,
"insert_parameter_preparation: %d args, %d in-reg, %d pre-push, %d/%d stack\n",
num_args, NUM_REGPARM, IF_X64_ELSE(total_pre_push, 0), param_stack, total_stack);
for (i = 0; i < num_args; i++) {
/* FIXME PR 302951: we need to handle state restoration if any
* of these args references app memory. We should pull the state from
* the priv_mcontext_t on the stack if in a clean call. FIXME: what if not?
*/
opnd_t arg = args[i];
CLIENT_ASSERT(opnd_get_size(arg) == OPSZ_PTR ||
opnd_is_immed_int(arg) IF_X64(|| opnd_get_size(arg) == OPSZ_4),
"Clean call arg has unsupported size");
# ifdef X64
/* PR 250976 #A: support args that reference param regs */
for (r = 0; r < opnd_num_regs_used(arg); r++) {
reg_id_t used = opnd_get_reg_used(arg, r);
int parm = reg_parameter_num(used);
/* See comments in loop above */
if ((parm == 0 && num_args > 1) || parm > (int)i ||
reg_overlap(used, REG_XSP)) {
int disp = 0;
if (clean_call && opnd_is_reg(arg)) {
/* We can point at the priv_mcontext_t slot.
* priv_mcontext_t is at the base of dstack: compute offset
* from xsp to the field we want and replace arg.
*/
disp += opnd_get_reg_dcontext_offs(opnd_get_reg(arg));
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
/* skip what this routine added */
disp += total_stack;
} else {
/* Push a temp on the stack and point at it. We
* could try to optimize by juggling registers, but
* not worth it.
*/
/* xsp was adjusted up above; we simply store to xsp offsets */
disp = param_stack + XSP_SZ * arg_pre_push;
if (opnd_is_reg(arg) && opnd_get_size(arg) == OPSZ_PTR) {
POST(ilist, prev,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, disp), arg));
} else {
reg_id_t xsp_scratch = regparms[0];
/* don't want to just change size since will read extra bytes.
* can't do mem-to-mem so go through scratch reg */
if (reg_overlap(used, REG_XSP)) {
/* Get original xsp into scratch[0] and replace in arg */
if (opnd_uses_reg(arg, regparms[0])) {
xsp_scratch = REG_XAX;
ASSERT(!opnd_uses_reg(arg, REG_XAX)); /* can't use 3 */
/* FIXME: rather than putting xsp into mcontext
* slot, better to just do local get from dcontext
* like we do for 32-bit below? */
POST(ilist, prev,
instr_create_restore_from_tls(dcontext, REG_XAX,
TLS_XAX_SLOT));
}
opnd_replace_reg(&arg, REG_XSP, xsp_scratch);
}
POST(ilist, prev,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, disp),
opnd_create_reg(regparms[0])));
/* If sub-ptr-size, zero-extend is what we want so no movsxd */
POST(ilist, prev,
INSTR_CREATE_mov_ld(
dcontext,
opnd_create_reg(shrink_reg_for_param(regparms[0], arg)),
arg));
if (reg_overlap(used, REG_XSP)) {
int xsp_disp = opnd_get_reg_dcontext_offs(REG_XSP) +
clean_call_beyond_mcontext() + total_stack;
POST(ilist, prev,
INSTR_CREATE_mov_ld(
dcontext, opnd_create_reg(xsp_scratch),
OPND_CREATE_MEMPTR(REG_XSP, xsp_disp)));
if (xsp_scratch == REG_XAX) {
POST(ilist, prev,
instr_create_save_to_tls(dcontext, REG_XAX,
TLS_XAX_SLOT));
}
}
if (opnd_uses_reg(arg, regparms[0])) {
/* must restore since earlier arg might have clobbered */
int mc_disp = opnd_get_reg_dcontext_offs(regparms[0]) +
clean_call_beyond_mcontext() + total_stack;
POST(ilist, prev,
INSTR_CREATE_mov_ld(
dcontext, opnd_create_reg(regparms[0]),
OPND_CREATE_MEMPTR(REG_XSP, mc_disp)));
}
}
arg_pre_push++; /* running counter */
}
arg =
opnd_create_base_disp(REG_XSP, REG_NULL, 0, disp, opnd_get_size(arg));
break; /* once we've handled arg ignore futher reg refs */
}
}
# endif
if (i < NUM_REGPARM) {
reg_id_t regparm = shrink_reg_for_param(regparms[i], arg);
if (opnd_is_immed_int(arg) || opnd_is_instr(arg)) {
POST(ilist, mark,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(regparm), arg));
} else {
POST(ilist, mark,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(regparm), arg));
}
} else {
if (push) {
IF_X64(ASSERT_NOT_REACHED()); /* no 64-bit push_imm! */
if (opnd_is_immed_int(arg) || opnd_is_instr(arg))
POST(ilist, mark, INSTR_CREATE_push_imm(dcontext, arg));
else {
if (clean_call && opnd_uses_reg(arg, REG_XSP)) {
/* We do a purely local expansion:
* spill eax, mc->eax, esp->eax, arg->eax, push eax, restore eax
*/
reg_id_t scratch = REG_XAX;
if (opnd_uses_reg(arg, scratch)) {
scratch = REG_XCX;
ASSERT(!opnd_uses_reg(arg, scratch)); /* can't use 3 regs */
}
opnd_replace_reg(&arg, REG_XSP, scratch);
POST(ilist, mark,
instr_create_restore_from_tls(dcontext, scratch,
TLS_XAX_SLOT));
POST(ilist, mark, INSTR_CREATE_push(dcontext, arg));
POST(ilist, mark,
instr_create_restore_from_dc_via_reg(dcontext, scratch,
scratch, XSP_OFFSET));
insert_get_mcontext_base(dcontext, ilist, instr_get_next(mark),
scratch);
POST(ilist, mark,
instr_create_save_to_tls(dcontext, scratch, TLS_XAX_SLOT));
} else
POST(ilist, mark, INSTR_CREATE_push(dcontext, arg));
}
} else {
/* xsp was adjusted up above; we simply store to xsp offsets */
uint offs = REGPARM_MINSTACK + XSP_SZ * (i - NUM_REGPARM);
# ifdef X64
if (opnd_is_immed_int(arg) || opnd_is_instr(arg)) {
/* PR 250976 #3: there is no memory store of 64-bit-immediate,
* so go through scratch reg */
ASSERT(NUM_REGPARM > 0);
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEMPTR(REG_XSP, offs),
opnd_create_reg(regparms[0])));
POST(ilist, mark,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(regparms[0]),
arg));
} else {
# endif
if (opnd_is_memory_reference(arg)) {
/* can't do mem-to-mem so go through scratch */
reg_id_t scratch;
if (NUM_REGPARM > 0)
scratch = regparms[0];
else {
/* This happens on Mac.
* FIXME i#1370: not safe if later arg uses xax:
* local spill? Review how regparms[0] is preserved.
*/
scratch = REG_XAX;
}
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offs),
opnd_create_reg(scratch)));
POST(ilist, mark,
INSTR_CREATE_mov_ld(
dcontext,
opnd_create_reg(shrink_reg_for_param(scratch, arg)),
arg));
} else {
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offs), arg));
}
# ifdef X64
}
# endif
}
}
}
if (!push && total_stack > 0) {
POST(ilist, prev, /* before everything else: pre-push and args */
/* can we use sub? may as well preserve eflags */
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, -(int)total_stack)));
}
if (restore_xsp) {
/* before restore_xax, since we're going to clobber xax */
int disp = opnd_get_reg_dcontext_offs(REG_XSP);
instr_t *where = instr_get_next(prev);
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
insert_get_mcontext_base(dcontext, ilist, where, REG_XAX);
PRE(ilist, where,
instr_create_restore_from_dc_via_reg(dcontext, REG_XAX, REG_XAX, XSP_OFFSET));
PRE(ilist, where,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEMPTR(REG_XSP, disp),
opnd_create_reg(REG_XAX)));
/* now we need restore_xax to be AFTER this */
prev = instr_get_prev(where);
}
if (restore_xax) {
int disp = opnd_get_reg_dcontext_offs(REG_XAX);
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
POST(ilist, prev, /* before everything else: pre-push, args, and stack adjust */
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX),
OPND_CREATE_MEMPTR(REG_XSP, disp)));
}
return total_stack;
}
/* If jmp_instr == NULL, uses jmp_tag, otherwise uses jmp_instr
*/
void
insert_clean_call_with_arg_jmp_if_ret_true(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, void *callee, int arg,
app_pc jmp_tag, instr_t *jmp_instr)
{
instr_t *false_popa, *jcc;
byte *encode_pc = vmcode_get_start();
prepare_for_clean_call(dcontext, NULL, ilist, instr, encode_pc);
dr_insert_call(dcontext, ilist, instr, callee, 1, OPND_CREATE_INT32(arg));
/* if the return value (xax) is 0, then jmp to internal false path */
PRE(ilist, instr, /* can't cmp w/ 64-bit immed so use test (shorter anyway) */
INSTR_CREATE_test(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XAX)));
/* fill in jcc target once have false path */
jcc = INSTR_CREATE_jcc(dcontext, OP_jz, opnd_create_pc(NULL));
PRE(ilist, instr, jcc);
/* if it falls through, then it's true, so restore and jmp to true tag
* passed in by caller
*/
cleanup_after_clean_call(dcontext, NULL, ilist, instr, encode_pc);
if (jmp_instr == NULL) {
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr,
INSTR_CREATE_jmp(dcontext, opnd_create_pc(jmp_tag)));
} else {
PRE(ilist, instr, INSTR_CREATE_jmp(dcontext, opnd_create_instr(jmp_instr)));
}
/* otherwise (if returned false), just do standard popf and continue */
/* get 1st instr of cleanup path */
false_popa = instr_get_prev(instr);
cleanup_after_clean_call(dcontext, NULL, ilist, instr, encode_pc);
false_popa = instr_get_next(false_popa);
instr_set_target(jcc, opnd_create_instr(false_popa));
}
/* If !precise, encode_pc is treated as +- a page (meant for clients
* writing an instrlist to gencode so not sure of exact placement but
* within a page).
* If encode_pc == vmcode_get_start(), checks reachability of whole
* vmcode region (meant for code going somewhere not precisely known
* in the code cache).
* Returns whether ended up using a direct cti. If inlined_tgt_instr != NULL,
* and an inlined target was used, returns a pointer to that instruction
* in *inlined_tgt_instr.
*/
bool
insert_reachable_cti(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
byte *encode_pc, byte *target, bool jmp, bool returns, bool precise,
reg_id_t scratch, instr_t **inlined_tgt_instr)
{
byte *encode_start;
byte *encode_end;
if (precise) {
encode_start = target + JMP_LONG_LENGTH;
encode_end = encode_start;
} else if (encode_pc == vmcode_get_start()) {
/* consider whole vmcode region */
encode_start = encode_pc;
encode_end = vmcode_get_end();
} else {
encode_start = (byte *)PAGE_START(encode_pc - PAGE_SIZE);
encode_end = (byte *)ALIGN_FORWARD(encode_pc + PAGE_SIZE, PAGE_SIZE);
}
if (REL32_REACHABLE(encode_start, target) && REL32_REACHABLE(encode_end, target)) {
/* For precise, we could consider a short cti, but so far no
* users are precise so we'll leave that for i#56.
*/
if (jmp)
PRE(ilist, where, INSTR_CREATE_jmp(dcontext, opnd_create_pc(target)));
else
PRE(ilist, where, INSTR_CREATE_call(dcontext, opnd_create_pc(target)));
return true;
} else {
opnd_t ind_tgt;
instr_t *inlined_tgt = NULL;
if (scratch == DR_REG_NULL) {
/* indirect through an inlined target */
inlined_tgt = instr_build_bits(dcontext, OP_UNDECODED, sizeof(target));
/* XXX: could use mov imm->xax and have target skip rex+opcode
* for clean disassembly
*/
instr_set_raw_bytes(inlined_tgt, (byte *)&target, sizeof(target));
/* this will copy the bytes for us, so we don't have to worry about
* the lifetime of the target param
*/
instr_allocate_raw_bits(dcontext, inlined_tgt, sizeof(target));
ind_tgt = opnd_create_mem_instr(inlined_tgt, 0, OPSZ_PTR);
if (inlined_tgt_instr != NULL)
*inlined_tgt_instr = inlined_tgt;
} else {
PRE(ilist, where,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(scratch),
OPND_CREATE_INTPTR(target)));
ind_tgt = opnd_create_reg(scratch);
if (inlined_tgt_instr != NULL)
*inlined_tgt_instr = NULL;
}
if (jmp)
PRE(ilist, where, INSTR_CREATE_jmp_ind(dcontext, ind_tgt));
else
PRE(ilist, where, INSTR_CREATE_call_ind(dcontext, ind_tgt));
if (inlined_tgt != NULL)
PRE(ilist, where, inlined_tgt);
return false;
}
}
/*###########################################################################
*###########################################################################
*
* M A N G L I N G R O U T I N E S
*/
#endif /* !STANDALONE_DECODER */
/* We export these mov/push utilities to drdecode */
/* If src_inst != NULL, uses it (and assumes it will be encoded at
* encode_estimate to determine whether > 32 bits or not: so if unsure where
* it will be encoded, pass a high address) as the immediate; else
* uses val.
*/
void
insert_mov_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, opnd_t dst, instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
instr_t *mov1, *mov2;
if (src_inst != NULL)
val = (ptr_int_t)encode_estimate;
#ifdef X64
if (X64_MODE_DC(dcontext) && !opnd_is_reg(dst)) {
if (val <= INT_MAX && val >= INT_MIN) {
/* mov is sign-extended, so we can use one move if it is all
* 0 or 1 in top 33 bits
*/
mov1 = INSTR_CREATE_mov_imm(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INT32((int)val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, mov1);
mov2 = NULL;
} else {
/* do mov-64-bit-immed in two pieces. tiny corner-case risk of racy
* access to [dst] if this thread is suspended in between or another
* thread is trying to read [dst], but o/w we have to spill and
* restore a register.
*/
CLIENT_ASSERT(opnd_is_memory_reference(dst), "invalid dst opnd");
/* mov low32 => [mem32] */
opnd_set_size(&dst, OPSZ_4);
mov1 = INSTR_CREATE_mov_st(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INT32((int)val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, mov1);
/* mov high32 => [mem32+4] */
if (opnd_is_base_disp(dst)) {
int disp = opnd_get_disp(dst);
CLIENT_ASSERT(disp + 4 > disp, "disp overflow");
opnd_set_disp(&dst, disp + 4);
} else {
byte *addr = opnd_get_addr(dst);
CLIENT_ASSERT(!POINTER_OVERFLOW_ON_ADD(addr, 4), "addr overflow");
dst = OPND_CREATE_ABSMEM(addr + 4, OPSZ_4);
}
mov2 = INSTR_CREATE_mov_st(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INT32((int)(val >> 32))
: opnd_create_instr_ex(src_inst, OPSZ_4, 32));
PRE(ilist, instr, mov2);
}
} else {
#endif
mov1 = INSTR_CREATE_mov_imm(dcontext, dst,
(src_inst == NULL)
? OPND_CREATE_INTPTR(val)
: opnd_create_instr_ex(src_inst, OPSZ_PTR, 0));
PRE(ilist, instr, mov1);
mov2 = NULL;
#ifdef X64
}
#endif
if (first != NULL)
*first = mov1;
if (last != NULL)
*last = mov2;
}
/* If src_inst != NULL, uses it (and assumes it will be encoded at
* encode_estimate to determine whether > 32 bits or not: so if unsure where
* it will be encoded, pass a high address) as the immediate; else
* uses val.
*/
void
insert_push_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
instr_t *push, *mov;
if (src_inst != NULL)
val = (ptr_int_t)encode_estimate;
#ifdef X64
if (X64_MODE_DC(dcontext)) {
/* do push-64-bit-immed in two pieces. tiny corner-case risk of racy
* access to TOS if this thread is suspended in between or another
* thread is trying to read its stack, but o/w we have to spill and
* restore a register.
*/
push = INSTR_CREATE_push_imm(dcontext,
(src_inst == NULL)
? OPND_CREATE_INT32((int)val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, push);
/* push is sign-extended, so we can skip top half if it is all 0 or 1
* in top 33 bits
*/
if (val <= INT_MAX && val >= INT_MIN) {
mov = NULL;
} else {
mov = INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 4),
(src_inst == NULL)
? OPND_CREATE_INT32((int)(val >> 32))
: opnd_create_instr_ex(src_inst, OPSZ_4, 32));
PRE(ilist, instr, mov);
}
} else {
#endif
push = INSTR_CREATE_push_imm(dcontext,
(src_inst == NULL)
? OPND_CREATE_INT32(val)
: opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, push);
mov = NULL;
#ifdef X64
}
#endif
if (first != NULL)
*first = push;
if (last != NULL)
*last = mov;
}
#ifndef STANDALONE_DECODER /* back for rest of file */
/* Far calls and rets have double total size */
static opnd_size_t
stack_entry_size(instr_t *instr, opnd_size_t opsize)
{
if (instr_get_opcode(instr) == OP_call_far ||
instr_get_opcode(instr) == OP_call_far_ind ||
instr_get_opcode(instr) == OP_ret_far) {
/* cut OPSZ_8_rex16_short4 in half */
if (opsize == OPSZ_4)
return OPSZ_2;
else if (opsize == OPSZ_8)
return OPSZ_4;
else {
# ifdef X64
ASSERT(opsize == OPSZ_16);
return OPSZ_8;
# else
ASSERT_NOT_REACHED();
# endif
}
} else if (instr_get_opcode(instr) == OP_iret) {
/* convert OPSZ_12_rex40_short6 */
if (opsize == OPSZ_6)
return OPSZ_2;
else if (opsize == OPSZ_12)
return OPSZ_4;
else {
# ifdef X64
ASSERT(opsize == OPSZ_40);
return OPSZ_8;
# else
ASSERT_NOT_REACHED();
# endif
}
}
return opsize;
}
/* Used for fault translation */
bool
instr_check_xsp_mangling(dcontext_t *dcontext, instr_t *inst, int *xsp_adjust)
{
ASSERT(xsp_adjust != NULL);
if (instr_get_opcode(inst) == OP_push || instr_get_opcode(inst) == OP_push_imm) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: push or push_imm\n");
*xsp_adjust -= opnd_size_in_bytes(opnd_get_size(instr_get_dst(inst, 1)));
} else if (instr_get_opcode(inst) == OP_pop) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: pop\n");
*xsp_adjust += opnd_size_in_bytes(opnd_get_size(instr_get_src(inst, 1)));
}
/* 1st part of push emulation from insert_push_retaddr */
else if (instr_get_opcode(inst) == OP_lea &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_get_base(instr_get_src(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_src(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: lea xsp adjust\n");
*xsp_adjust += opnd_get_disp(instr_get_src(inst, 0));
}
/* 2nd part of push emulation from insert_push_retaddr */
else if (instr_get_opcode(inst) == OP_mov_st &&
opnd_is_base_disp(instr_get_dst(inst, 0)) &&
opnd_get_base(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_dst(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: store to stack\n");
/* nothing to track: paired lea is what we undo */
}
/* retrieval of target for call* or jmp* */
else if ((instr_get_opcode(inst) == OP_movzx &&
reg_overlap(opnd_get_reg(instr_get_dst(inst, 0)), REG_XCX)) ||
(instr_get_opcode(inst) == OP_mov_ld &&
reg_overlap(opnd_get_reg(instr_get_dst(inst, 0)), REG_XCX))) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: ib tgt to *cx\n");
/* nothing: our xcx spill restore will undo */
}
/* part of pop emulation for iretd/lretd in x64 mode */
else if (instr_get_opcode(inst) == OP_mov_ld &&
opnd_is_base_disp(instr_get_src(inst, 0)) &&
opnd_get_base(instr_get_src(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_src(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: load from stack\n");
/* nothing to track: paired lea is what we undo */
}
/* part of data16 ret. once we have cs preservation (PR 271317) we'll
* need to not fail when walking over a movzx to a pop cs (right now we
* do not read the stack for the pop cs).
*/
else if (instr_get_opcode(inst) == OP_movzx &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_CX) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: movzx to cx\n");
/* nothing: our xcx spill restore will undo */
}
/* fake pop of cs for iret */
else if (instr_get_opcode(inst) == OP_add && opnd_is_reg(instr_get_dst(inst, 0)) &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_is_immed_int(instr_get_src(inst, 0))) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: add to xsp\n");
ASSERT(CHECK_TRUNCATE_TYPE_int(opnd_get_immed_int(instr_get_src(inst, 0))));
*xsp_adjust += (int)opnd_get_immed_int(instr_get_src(inst, 0));
}
/* popf for iret */
else if (instr_get_opcode(inst) == OP_popf) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: popf\n");
*xsp_adjust += opnd_size_in_bytes(opnd_get_size(instr_get_src(inst, 1)));
} else {
return false;
}
return true;
}
/* N.B.: keep in synch with instr_check_xsp_mangling() */
void
insert_push_retaddr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t retaddr, opnd_size_t opsize)
{
if (opsize == OPSZ_2) {
ptr_int_t val = retaddr & (ptr_int_t)0x0000ffff;
/* can't do a non-default operand size with a push immed so we emulate */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -2, OPSZ_lea)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM16(REG_XSP, 2),
OPND_CREATE_INT16(val)));
} else if (opsize ==
OPSZ_PTR IF_X64(|| (!X64_CACHE_MODE_DC(dcontext) && opsize == OPSZ_4))) {
insert_push_immed_ptrsz(dcontext, retaddr, ilist, instr, NULL, NULL);
} else {
# ifdef X64
ptr_int_t val = retaddr & (ptr_int_t)0xffffffff;
ASSERT(opsize == OPSZ_4);
/* can't do a non-default operand size with a push immed so we emulate */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -4, OPSZ_lea)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 0),
OPND_CREATE_INT32((int)val)));
# else
ASSERT_NOT_REACHED();
# endif
}
}
# ifdef CLIENT_INTERFACE
/* N.B.: keep in synch with instr_check_xsp_mangling() */
static void
insert_mov_ptr_uint_beyond_TOS(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t value, opnd_size_t opsize)
{
/* we insert non-meta b/c we want faults to go to app (should only fault
* if the ret itself faulted, barring races) for simplicity: o/w our
* our-mangling sequence gets broken up and more complex.
*/
if (opsize == OPSZ_2) {
ptr_int_t val = value & (ptr_int_t)0x0000ffff;
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM16(REG_XSP, -2),
OPND_CREATE_INT16(val)));
} else if (opsize == OPSZ_4) {
ptr_int_t val = value & (ptr_int_t)0xffffffff;
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -4),
OPND_CREATE_INT32(val)));
} else {
# ifdef X64
ptr_int_t val_low = value & (ptr_int_t)0xffffffff;
ASSERT(opsize == OPSZ_8);
if (CHECK_TRUNCATE_TYPE_int(value)) {
/* prefer a single write w/ sign-extension */
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM64(REG_XSP, -8),
OPND_CREATE_INT32(val_low)));
} else {
/* we need two 32-bit writes */
ptr_int_t val_high = (value >> 32);
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -8),
OPND_CREATE_INT32(val_low)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -4),
OPND_CREATE_INT32(val_high)));
}
# else
ASSERT_NOT_REACHED();
# endif
}
}
# endif /* CLIENT_INTERFACE */
static void
insert_push_cs(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t retaddr, opnd_size_t opsize)
{
# ifdef X64
if (X64_CACHE_MODE_DC(dcontext)) {
/* "push cs" is invalid; for now we push the typical cs values.
* i#823 covers doing this more generally.
*/
insert_push_retaddr(dcontext, ilist, instr,
X64_MODE_DC(dcontext) ? CS64_SELECTOR : CS32_SELECTOR,
opsize);
} else {
# endif
opnd_t stackop;
/* we go ahead and push cs, but we won't pop into cs */
instr_t *push = INSTR_CREATE_push(dcontext, opnd_create_reg(SEG_CS));
/* 2nd dest is the stack operand size */
stackop = instr_get_dst(push, 1);
opnd_set_size(&stackop, opsize);
instr_set_dst(push, 1, stackop);
PRE(ilist, instr, push);
# ifdef X64
}
# endif
}
/* We spill to XCX(private dcontext) slot for private fragments,
* and to TLS MANGLE_XCX_SPILL_SLOT for shared fragments.
* (Except for DYNAMO_OPTION(private_ib_in_tls), for which all use tls,
* but that has a performance hit because of the extra data cache line)
* We can get away with the split by having the shared ibl routine copy
* xcx to the private dcontext, and by having the private ibl never
* target shared fragments.
* We also have to modify the xcx spill from tls to private dcontext when
* adding a shared basic block to a trace.
*
* FIXME: if we do make non-trace-head basic blocks valid indirect branch
* targets, we should have the private ibl have special code to test the
* flags and copy xcx to the tls slot if necessary.
*/
# define SAVE_TO_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs) \
((DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, (flags))) \
? instr_create_save_to_tls(dc, reg, tls_offs) \
: instr_create_save_to_dcontext((dc), (reg), (dc_offs)))
# define SAVE_TO_DC_OR_TLS_OR_REG(dc, flags, reg, tls_offs, dc_offs, dest_reg) \
((X64_CACHE_MODE_DC(dc) && \
!X64_MODE_DC(dc) IF_X64(&&DYNAMO_OPTION(x86_to_x64_ibl_opt))) \
? INSTR_CREATE_mov_ld(dc, opnd_create_reg(dest_reg), opnd_create_reg(reg)) \
: SAVE_TO_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs) \
((DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, (flags))) \
? instr_create_restore_from_tls(dc, reg, tls_offs) \
: instr_create_restore_from_dcontext((dc), (reg), (dc_offs)))
static void
mangle_far_direct_helper(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*
* For WOW64, I tried keeping this a direct jmp for nice linking by doing the
* mode change in-fragment and then using a 64-bit stub with a 32-bit fragment,
* but that gets messy b/c a lot of code assumes it can create or calculate the
* size of exit stubs given nothing but the fragment flags. I tried adding
* FRAG_ENDS_IN_FAR_DIRECT but still need to pass another param to all the stub
* macros and routines for mid-trace exits and for prefixes for -disable_traces.
* So, going for treating as indirect and using the far_ibl. It's a trace
* barrier anyway, and rare. We treat it as indirect in all modes (including
* x86 builds) for simplicity (and eventually for full i#823 we'll want
* to issue cs changes there too).
*/
app_pc pc = opnd_get_pc(instr_get_target(instr));
# ifdef X64
if (!X64_MODE_DC(dcontext) &&
opnd_get_segment_selector(instr_get_target(instr)) == CS64_SELECTOR) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX, MANGLE_FAR_SPILL_SLOT,
XBX_OFFSET, REG_R10));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_INT32(CS64_SELECTOR)));
}
# endif
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
ASSERT((ptr_uint_t)pc < UINT_MAX); /* 32-bit code! */
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_INT32((ptr_uint_t)pc)));
}
/***************************************************************************
* DIRECT CALL
* Returns new next_instr
*/
instr_t *
mangle_direct_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
ptr_uint_t retaddr;
app_pc target = NULL;
opnd_t pushop = instr_get_dst(instr, 1);
opnd_size_t pushsz = stack_entry_size(instr, opnd_get_size(pushop));
if (opnd_is_near_pc(instr_get_target(instr)))
target = opnd_get_pc(instr_get_target(instr));
else if (opnd_is_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
/* assumption: target's raw bits are meaningful */
target = instr_get_raw_bits(tgt);
ASSERT(target != 0);
/* FIXME case 6962: for far instr, we ignore the segment and
* assume it matches current cs */
} else if (opnd_is_far_pc(instr_get_target(instr))) {
target = opnd_get_pc(instr_get_target(instr));
/* FIXME case 6962: we ignore the segment and assume it matches current cs */
} else
ASSERT_NOT_REACHED();
if (!mangle_calls) {
/* off-trace call that will be executed natively */
/* relative target must be re-encoded */
instr_set_raw_bits_valid(instr, false);
# ifdef STEAL_REGISTER
/* FIXME: need to push edi prior to call and pop after.
* However, need to push edi prior to any args to this call,
* and it may be hard to find pre-arg-pushing spot...
* edi is supposed to be callee-saved, we're trusting this
* off-trace call to return, we may as well trust it to
* not trash edi -- these no-inline calls are dynamo's
* own routines, after all.
*/
# endif
return next_instr;
}
retaddr = get_call_return_address(dcontext, ilist, instr);
# ifdef CHECK_RETURNS_SSE2
/* ASSUMPTION: a call to the next instr is not going to ever have a
* matching ret! */
if (target == (app_pc)retaddr) {
LOG(THREAD, LOG_INTERP, 3, "found call to next instruction " PFX "\n", target);
} else {
check_return_handle_call(dcontext, ilist, next_instr);
}
/* now do the normal thing for a call */
# endif
if (instr_get_opcode(instr) == OP_call_far) {
/* N.B.: we do not support other than flat 0-based CS, DS, SS, and ES.
* if the app wants to change segments, we won't actually issue
* a segment change, and so will only work properly if the new segment
* is also 0-based. To properly issue new segments, we'd need a special
* ibl that ends in a far cti, and all prior address manipulations would
* need to be relative to the new segment, w/o messing up current segment.
* FIXME: can we do better without too much work?
* XXX: yes, for wow64: i#823: TODO mangle this like a far direct jmp
*/
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far direct call");
STATS_INC(num_far_dir_calls);
mangle_far_direct_helper(dcontext, ilist, instr, next_instr, flags);
insert_push_cs(dcontext, ilist, instr, 0, pushsz);
}
/* convert a direct call to a push of the return address */
insert_push_retaddr(dcontext, ilist, instr, retaddr, pushsz);
/* remove the call */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
}
# ifdef UNIX
/***************************************************************************
* Mangle the memory reference operand that uses fs/gs semgents,
* get the segment base of fs/gs into reg, and
* replace oldop with newop using reg instead of fs/gs
* The reg must not be used in the oldop, otherwise, the reg value
* is corrupted.
*/
opnd_t
mangle_seg_ref_opnd(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
opnd_t oldop, reg_id_t reg)
{
opnd_t newop;
reg_id_t seg;
ASSERT(opnd_is_far_base_disp(oldop));
seg = opnd_get_segment(oldop);
/* we only mangle fs/gs */
if (seg != SEG_GS && seg != SEG_FS)
return oldop;
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return oldop;
# endif
/* The reg should not be used by the oldop */
ASSERT(!opnd_uses_reg(oldop, reg));
/* XXX: this mangling is pattern-matched in translation's instr_is_seg_ref_load() */
/* get app's segment base into reg. */
PRE(ilist, where,
instr_create_restore_from_tls(dcontext, reg, os_get_app_tls_base_offset(seg)));
if (opnd_get_index(oldop) != REG_NULL && opnd_get_base(oldop) != REG_NULL) {
/* if both base and index are used, use
* lea [base, reg, 1] => reg
* to get the base + seg_base into reg.
*/
PRE(ilist, where,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(reg),
opnd_create_base_disp(opnd_get_base(oldop), reg, 1, 0, OPSZ_lea)));
}
if (opnd_get_index(oldop) != REG_NULL) {
newop = opnd_create_base_disp(reg, opnd_get_index(oldop), opnd_get_scale(oldop),
opnd_get_disp(oldop), opnd_get_size(oldop));
} else {
newop = opnd_create_base_disp(opnd_get_base(oldop), reg, 1, opnd_get_disp(oldop),
opnd_get_size(oldop));
}
return newop;
}
# endif /* UNIX */
/***************************************************************************
* INDIRECT CALL
*/
static reg_id_t
mangle_far_indirect_helper(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags, opnd_t *target_out)
{
opnd_t target = *target_out;
opnd_size_t addr_size;
reg_id_t reg_target = REG_NULL;
ASSERT(instr_get_opcode(instr) == OP_jmp_far_ind ||
instr_get_opcode(instr) == OP_call_far_ind);
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*/
/* opnd type is i_Ep, it's not a far base disp b/c segment is at
* memory location, not specified as segment prefix on instr
* we assume register operands are marked as invalid instrs long
* before this point.
*/
ASSERT(opnd_is_base_disp(target));
/* Segment selector is the final 2 bytes.
* For non-mixed-mode, we ignore it.
* We assume DS base == target cti CS base.
*/
/* if data16 then just 2 bytes for address
* if x64 mode and Intel and rex then 8 bytes for address */
ASSERT((X64_MODE_DC(dcontext) && opnd_get_size(target) == OPSZ_10 &&
proc_get_vendor() != VENDOR_AMD) ||
opnd_get_size(target) == OPSZ_6 || opnd_get_size(target) == OPSZ_4);
if (opnd_get_size(target) == OPSZ_10) {
addr_size = OPSZ_8;
reg_target = REG_RCX;
} else if (opnd_get_size(target) == OPSZ_6) {
addr_size = OPSZ_4;
reg_target = REG_ECX;
} else /* target has OPSZ_4 */ {
addr_size = OPSZ_2;
reg_target = REG_XCX; /* caller uses movzx so size doesn't have to match */
}
# ifdef X64
if (mixed_mode_enabled()) {
/* While we don't support arbitrary segments, we do support
* mode changes using standard cs selector values (i#823).
* We save the selector into xbx.
*/
opnd_t sel = target;
opnd_set_disp(&sel, opnd_get_disp(target) + opnd_size_in_bytes(addr_size));
opnd_set_size(&sel, OPSZ_2);
/* all scratch space should be in TLS only */
ASSERT(TEST(FRAG_SHARED, flags) || DYNAMO_OPTION(private_ib_in_tls));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX, MANGLE_FAR_SPILL_SLOT,
XBX_OFFSET, REG_R10));
PRE(ilist, instr, INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX), sel));
if (instr_uses_reg(instr, REG_XBX)) {
/* instr can't be both riprel (uses xax slot for mangling) and use
* a register, so we spill to the riprel (== xax) slot
*/
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XBX, MANGLE_RIPREL_SPILL_SLOT,
XAX_OFFSET));
POST(ilist, instr,
instr_create_restore_from_tls(dcontext, REG_XBX,
MANGLE_RIPREL_SPILL_SLOT));
}
}
# endif
opnd_set_size(target_out, addr_size);
return reg_target;
}
instr_t *
mangle_indirect_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
opnd_t target;
ptr_uint_t retaddr;
opnd_t pushop = instr_get_dst(instr, 1);
opnd_size_t pushsz = stack_entry_size(instr, opnd_get_size(pushop));
reg_id_t reg_target = REG_XCX;
if (!mangle_calls)
return next_instr;
retaddr = get_call_return_address(dcontext, ilist, instr);
/* Convert near, indirect calls. The jump to the exit_stub that
* jumps to indirect_branch_lookup was already inserted into the
* instr list by interp EXCEPT for the case in which we're converting
* an indirect call to a direct call. In that case, mangle later
* inserts a direct exit stub.
*/
/* If this call is marked for conversion, do minimal processing.
* FIXME Just a note that converted calls are not subjected to any of
* the specialized builds' processing further down.
*/
if (TEST(INSTR_IND_CALL_DIRECT, instr->flags)) {
/* convert the call to a push of the return address */
insert_push_retaddr(dcontext, ilist, instr, retaddr, pushsz);
/* remove the call */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
}
/* put the push AFTER the instruction that calculates
* the target, b/c if target depends on xsp we must use
* the value of xsp prior to this call instruction!
* we insert before next_instr to accomplish this.
*/
if (instr_get_opcode(instr) == OP_call_far_ind) {
/* goes right before the push of the ret addr */
insert_push_cs(dcontext, ilist, next_instr, 0, pushsz);
/* see notes below -- we don't really support switching segments,
* though we do go ahead and push cs, we won't pop into cs
*/
}
insert_push_retaddr(dcontext, ilist, next_instr, retaddr, pushsz);
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
# ifdef STEAL_REGISTER
/* Steal edi if call uses it, using original call instruction */
steal_reg(dcontext, instr, ilist);
if (ilist->flags)
restore_state(dcontext, next_instr, ilist);
/* It's impossible for our register stealing to use ecx
* because no call can simultaneously use 3 registers, right?
* Maximum is 2, in something like "call *(edi,ecx,4)"?
* If it is possible, need to make sure stealing's use of ecx
* doesn't conflict w/ our use
*/
# endif
/* change: call /2, Ev -> movl Ev, %xcx */
target = instr_get_src(instr, 0);
if (instr_get_opcode(instr) == OP_call_far_ind) {
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far indirect call");
STATS_INC(num_far_ind_calls);
reg_target = mangle_far_indirect_helper(dcontext, ilist, instr, next_instr, flags,
&target);
}
# ifdef UNIX
/* i#107, mangle the memory reference opnd that uses segment register. */
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(target)) {
/* FIXME: we use REG_XCX to store the segment base, which might be used
* in target and cause assertion failure in mangle_seg_ref_opnd.
*/
ASSERT_BUG_NUM(107, !opnd_uses_reg(target, REG_XCX));
target = mangle_seg_ref_opnd(dcontext, ilist, instr, target, REG_XCX);
}
# endif
/* cannot call instr_reset, it will kill prev & next ptrs */
instr_free(dcontext, instr);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_opcode(instr, opnd_get_size(target) == OPSZ_2 ? OP_movzx : OP_mov_ld);
instr_set_dst(instr, 0, opnd_create_reg(reg_target));
instr_set_src(instr, 0, target); /* src stays the same */
if (instrlist_get_translation_target(ilist) != NULL) {
/* make sure original raw bits are used for translation */
instr_set_translation(instr, instr_get_raw_bits(instr));
}
instr_set_our_mangling(instr, true);
# ifdef CHECK_RETURNS_SSE2
check_return_handle_call(dcontext, ilist, next_instr);
# endif
return next_instr;
}
/***************************************************************************
* RETURN
*/
# ifdef X64
/* Saves the selector from the top of the stack into xbx, after spilling xbx,
* for far_ibl.
*/
static void
mangle_far_return_save_selector(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
uint flags)
{
if (mixed_mode_enabled()) {
/* While we don't support arbitrary segments, we do support
* mode changes using standard cs selector values (i#823).
* We save the selector into xbx.
*/
/* We could do a pop but state xl8 is already set up to restore lea */
/* all scratch space should be in TLS only */
ASSERT(TEST(FRAG_SHARED, flags) || DYNAMO_OPTION(private_ib_in_tls));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX, MANGLE_FAR_SPILL_SLOT,
XBX_OFFSET, REG_R10));
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_MEM16(REG_XSP, 0)));
}
}
# endif
void
mangle_return(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
instr_t *pop;
opnd_t retaddr;
opnd_size_t retsz;
# ifdef CHECK_RETURNS_SSE2
check_return_handle_return(dcontext, ilist, next_instr);
/* now do the normal ret mangling */
# endif
/* Convert returns. If aggressive we could take advantage of the
* fact that xcx is dead at the return and not bother saving it?
* The jump to the exit_stub that jumps to indirect_branch_lookup
* was already inserted into the instr list by interp. */
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
/* see if ret has an immed int operand, assumed to be 1st src */
if (instr_num_srcs(instr) > 0 && opnd_is_immed_int(instr_get_src(instr, 0))) {
/* if has an operand, return removes some stack space,
* AFTER the return address is popped
*/
int val = (int)opnd_get_immed_int(instr_get_src(instr, 0));
IF_X64(ASSERT_TRUNCATE(val, int, opnd_get_immed_int(instr_get_src(instr, 0))));
/* addl sizeof_param_area, %xsp
* except that clobbers the flags, so we use lea */
PRE(ilist, next_instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, val, OPSZ_lea)));
}
/* don't need to steal edi since return cannot use registers */
/* the retaddr operand is always the final source for all OP_ret* instrs */
retaddr = instr_get_src(instr, instr_num_srcs(instr) - 1);
retsz = stack_entry_size(instr, opnd_get_size(retaddr));
if (X64_CACHE_MODE_DC(dcontext) && retsz == OPSZ_4) {
if (instr_get_opcode(instr) == OP_iret || instr_get_opcode(instr) == OP_ret_far) {
/* N.B.: For some unfathomable reason iret and ret_far default to operand
* size 4 in 64-bit mode (making them, along w/ call_far, the only stack
* operation instructions to do so). So if we see an iret or far ret with
* OPSZ_4 in 64-bit mode we need a 4-byte pop, but since we can't actually
* generate a 4-byte pop we have to emulate it here. */
SYSLOG_INTERNAL_WARNING_ONCE("Encountered iretd/lretd in 64-bit mode!");
}
/* Note moving into ecx automatically zero extends which is what we want. */
PRE(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_MEM32(REG_RSP, 0)));
/* iret could use add since going to pop the eflags, but not lret.
* lret could combine w/ segment lea below: but not perf-crit instr, and
* anticipating cs preservation PR 271317 I'm leaving separate. */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 4, OPSZ_lea)));
} else {
/* change RET into a POP, keeping the operand size */
opnd_t memop = retaddr;
pop = INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_XCX));
/* need per-entry size, not total size (double for far ret) */
opnd_set_size(&memop, retsz);
instr_set_src(pop, 1, memop);
if (retsz == OPSZ_2)
instr_set_dst(pop, 0, opnd_create_reg(REG_CX));
/* We can't do a 4-byte pop in 64-bit mode, but excepting iretd and lretd
* handled above we should never see one. */
ASSERT(!X64_MODE_DC(dcontext) || retsz != OPSZ_4);
PRE(ilist, instr, pop);
if (retsz == OPSZ_2) {
/* we need to zero out the top 2 bytes */
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_ECX),
opnd_create_reg(REG_CX)));
}
}
# ifdef CLIENT_INTERFACE
if (TEST(INSTR_CLOBBER_RETADDR, instr->flags)) {
/* we put the value in the note field earlier */
ptr_uint_t val = (ptr_uint_t)instr->note;
insert_mov_ptr_uint_beyond_TOS(dcontext, ilist, instr, val, retsz);
}
# endif
if (instr_get_opcode(instr) == OP_ret_far) {
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*/
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far ret");
STATS_INC(num_far_rets);
# ifdef X64
mangle_far_return_save_selector(dcontext, ilist, instr, flags);
# endif
/* pop selector from stack, but not into cs, just junk it
* (the 16-bit selector is expanded to 32 bits on the push, unless data16)
*/
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
opnd_size_in_bytes(retsz), OPSZ_lea)));
}
if (instr_get_opcode(instr) == OP_iret) {
instr_t *popf;
/* Xref PR 215553 and PR 191977 - we actually see this on 64-bit Vista */
LOG(THREAD, LOG_INTERP, 2, "Encountered iret at " PFX " - mangling\n",
instr_get_translation(instr));
STATS_INC(num_irets);
/* In 32-bit mode this is a pop->EIP pop->CS pop->eflags.
* 64-bit mode (with either 32-bit or 64-bit operand size,
* despite the (wrong) Intel manual pseudocode: see i#833 and
* the win32.mixedmode test) extends
* the above and additionally adds pop->RSP pop->ss. N.B.: like OP_far_ret we
* ignore the CS (except mixed-mode WOW64) and SS segment changes
* (see the comments there).
*/
# ifdef X64
mangle_far_return_save_selector(dcontext, ilist, instr, flags);
# endif
/* Return address is already popped, next up is CS segment which we ignore
* (unless in mixed-mode, handled above) so
* adjust stack pointer. Note we can use an add here since the eflags will
* be written below. */
PRE(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT8(opnd_size_in_bytes(retsz))));
/* Next up is xflags, we use a popf. Popf should be setting the right flags
* (it's difficult to tell because in the docs iret lists the flags it does
* set while popf lists the flags it doesn't set). The docs aren't entirely
* clear, but any flag that we or a user mode program would care about should
* be right. */
popf = INSTR_CREATE_popf(dcontext);
if (X64_CACHE_MODE_DC(dcontext) && retsz == OPSZ_4) {
/* We can't actually create a 32-bit popf and there's no easy way to
* simulate one. For now we'll do a 64-bit popf and fixup the stack offset.
* If AMD/INTEL ever start using the top half of the rflags register then
* we could have problems here. We could also break stack transparency and
* do a mov, push, popf to zero extend the value. */
PRE(ilist, instr, popf);
/* flags are already set, must use lea to fix stack */
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -4, OPSZ_lea)));
} else {
/* get popf size right the same way we do it for the return address */
opnd_t memop = retaddr;
opnd_set_size(&memop, retsz);
DOCHECK(1, {
if (retsz == OPSZ_2)
ASSERT_NOT_TESTED();
});
instr_set_src(popf, 1, memop);
PRE(ilist, instr, popf);
}
/* Mangles single step exception after a popf. */
mangle_possible_single_step(dcontext, ilist, popf);
# ifdef X64
/* In x64 mode, iret additionally does pop->RSP and pop->ss. */
if (X64_MODE_DC(dcontext)) {
if (retsz == OPSZ_8)
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RSP)));
else if (retsz == OPSZ_4) {
PRE(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_ESP),
OPND_CREATE_MEM32(REG_RSP, 0)));
} else {
ASSERT_NOT_TESTED();
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_ESP),
OPND_CREATE_MEM16(REG_RSP, 0)));
}
/* We're ignoring the set of SS and since we just set RSP we don't need
* to do anything to adjust the stack for the pop (since the pop would have
* occurred with the old RSP). */
}
# endif
}
/* remove the ret */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
/***************************************************************************
* INDIRECT JUMP
*/
instr_t *
mangle_indirect_jump(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
opnd_t target;
reg_id_t reg_target = REG_XCX;
/* Convert indirect branches (that are not returns). Again, the
* jump to the exit_stub that jumps to indirect_branch_lookup
* was already inserted into the instr list by interp. */
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX, MANGLE_XCX_SPILL_SLOT,
XCX_OFFSET, REG_R9));
# ifdef STEAL_REGISTER
/* Steal edi if branch uses it, using original instruction */
steal_reg(dcontext, instr, ilist);
if (ilist->flags)
restore_state(dcontext, next_instr, ilist);
# endif
/* change: jmp /4, i_Ev -> movl i_Ev, %xcx */
target = instr_get_target(instr);
if (instr_get_opcode(instr) == OP_jmp_far_ind) {
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far indirect jump");
STATS_INC(num_far_ind_jmps);
reg_target = mangle_far_indirect_helper(dcontext, ilist, instr, next_instr, flags,
&target);
}
# ifdef UNIX
/* i#107, mangle the memory reference opnd that uses segment register. */
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(target)) {
/* FIXME: we use REG_XCX to store segment base, which might be used
* in target and cause assertion failure in mangle_seg_ref_opnd.
*/
ASSERT_BUG_NUM(107, !opnd_uses_reg(target, REG_XCX));
target = mangle_seg_ref_opnd(dcontext, ilist, instr, target, REG_XCX);
}
# endif
/* cannot call instr_reset, it will kill prev & next ptrs */
instr_free(dcontext, instr);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_opcode(instr, opnd_get_size(target) == OPSZ_2 ? OP_movzx : OP_mov_ld);
instr_set_dst(instr, 0, opnd_create_reg(reg_target));
instr_set_src(instr, 0, target); /* src stays the same */
if (instrlist_get_translation_target(ilist) != NULL) {
/* make sure original raw bits are used for translation */
instr_set_translation(instr, instr_get_raw_bits(instr));
}
instr_set_our_mangling(instr, true);
/* It's impossible for our register stealing to use ecx
* because no branch can simultaneously use 3 registers, right?
* Maximum is 2, in something like "jmp *(edi,ebx,4)"?
* If it is possible, need to make sure stealing's use of ecx
* doesn't conflict w/ our use = FIXME
*/
return next_instr;
}
/***************************************************************************
* FAR DIRECT JUMP
*/
void
mangle_far_direct_jump(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far direct jmp");
STATS_INC(num_far_dir_jmps);
mangle_far_direct_helper(dcontext, ilist, instr, next_instr, flags);
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
/***************************************************************************
* SYSCALL
*/
# ifdef UNIX
/* Inserts code to handle clone into ilist.
* instr is the syscall instr itself.
* Assumes that instructions exist beyond instr in ilist.
*
* CAUTION: don't use a lot of stack in the generated code because
* get_clone_record() makes assumptions about the usage of stack being
* less than a page.
*/
void
mangle_insert_clone_code(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr _IF_X64(gencode_mode_t mode))
{
/* int 0x80
* xchg xax,xcx
* jecxz child
* jmp parent
* child:
* xchg xax,xcx
* # i#149/PR 403015: the child is on the dstack so no need to swap stacks
* jmp new_thread_dynamo_start
* parent:
* xchg xax,xcx
* <post system call, etc.>
*/
instr_t *in = instr_get_next(instr);
instr_t *child = INSTR_CREATE_label(dcontext);
instr_t *parent = INSTR_CREATE_label(dcontext);
ASSERT(in != NULL);
PRE(ilist, in,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XCX)));
PRE(ilist, in, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(child)));
PRE(ilist, in, INSTR_CREATE_jmp(dcontext, opnd_create_instr(parent)));
PRE(ilist, in, child);
PRE(ilist, in,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XCX)));
/* We used to insert this directly into fragments for inlined system
* calls, but not once we eliminated clean calls out of the DR cache
* for security purposes. Thus it can be a meta jmp, or an indirect jmp.
*/
insert_reachable_cti(dcontext, ilist, in, vmcode_get_start(),
(byte *)get_new_thread_start(dcontext _IF_X64(mode)),
true /*jmp*/, false /*!returns*/, false /*!precise*/,
DR_REG_NULL /*no scratch*/, NULL);
instr_set_meta(instr_get_prev(in));
PRE(ilist, in, parent);
PRE(ilist, in,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XCX)));
}
# endif /* UNIX */
# ifdef WINDOWS
/* Note that ignore syscalls processing for XP and 2003 is a two-phase operation.
* For this reason, mangle_syscall() might be called with a 'next_instr' that's
* not an original app instruction but one inserted by the earlier mangling phase.
*/
# endif
/* XXX: any extra code here can interfere with mangle_syscall_code()
* and interrupted_inlined_syscall() which have assumptions about the
* exact code around inlined system calls.
*/
void
mangle_syscall_arch(dcontext_t *dcontext, instrlist_t *ilist, uint flags, instr_t *instr,
instr_t *next_instr)
{
# ifdef UNIX
/* Shared routine already checked method, handled INSTR_NI_SYSCALL*,
* and inserted the signal barrier and non-auto-restart nop.
* If we get here, we're dealing with an ignorable syscall.
*/
# ifdef MACOS
if (instr_get_opcode(instr) == OP_sysenter) {
/* The kernel returns control to whatever user-mode places in edx.
* We get control back here and then go to the ret ibl (since normally
* there's a call to a shared routine that does "pop edx").
*/
instr_t *post_sysenter = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
instrlist_insert_mov_instr_addr(dcontext, post_sysenter, NULL /*in cache*/,
opnd_create_reg(REG_XDX), ilist, instr, NULL,
NULL);
/* sysenter goes here */
PRE(ilist, next_instr, post_sysenter);
PRE(ilist, next_instr,
RESTORE_FROM_DC_OR_TLS(dcontext, flags, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
PRE(ilist, next_instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, opnd_create_reg(REG_XCX),
opnd_create_reg(REG_XDX)));
} else if (TEST(INSTR_BRANCH_SPECIAL_EXIT, instr->flags)) {
int num = instr_get_interrupt_number(instr);
ASSERT(instr_get_opcode(instr) == OP_int);
if (num == 0x81 || num == 0x82) {
int reason = (num == 0x81) ? EXIT_REASON_NI_SYSCALL_INT_0x81
: EXIT_REASON_NI_SYSCALL_INT_0x82;
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true /*save_xdi*/);
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_dcontext_field_via_reg_sz(
dcontext, REG_NULL /*default*/, EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(reason)));
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, reason,
EXIT_REASON_OFFSET));
}
}
}
# endif
# ifdef STEAL_REGISTER
/* in linux, system calls get their parameters via registers.
* edi is the last one used, but there are system calls that
* use it, so we put the real value into edi. plus things
* like fork() should get the real register values.
* it's also a good idea to put the real edi into %edi for
* debugger interrupts (int3).
*/
/* the only way we can save and then restore our dc
* ptr is to use the stack!
* this should be fine, all interrupt instructions push
* both eflags and return address on stack, so esp must
* be valid at this point. there could be an application
* assuming only 2 slots on stack will be used, we use a 3rd
* slot, could mess up that app...but what can we do?
* also, if kernel examines user stack, we could have problems.
* push edi # push dcontext ptr
* restore edi # restore app edi
* <syscall>
* push ebx
* mov edi, ebx
* mov 4(esp), edi # get dcontext ptr
* save ebx to edi slot
* pop ebx
* add 4,esp # clean up push of dcontext ptr
*/
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EDI)));
PRE(ilist, instr, instr_create_restore_from_dcontext(dcontext, REG_EDI, XDI_OFFSET));
/* insert after in reverse order: */
POST(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_ESP), OPND_CREATE_INT8(4)));
POST(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_EBX)));
POST(ilist, instr, instr_create_save_to_dcontext(dcontext, REG_EBX, XDI_OFFSET));
POST(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_EDI),
OPND_CREATE_MEM32(REG_ESP, 4)));
POST(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_EBX),
opnd_create_reg(REG_EDI)));
POST(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EBX)));
# endif /* STEAL_REGISTER */
# else /* WINDOWS */
/* special handling of system calls is performed in shared_syscall or
* in do_syscall
*/
/* FIXME: for ignorable syscalls,
* do we need support for exiting mid-fragment prior to a syscall
* like we do on Linux, to bound time in cache?
*/
if (does_syscall_ret_to_callsite()) {
uint len = instr_length(dcontext, instr);
if (TEST(INSTR_SHARED_SYSCALL, instr->flags)) {
ASSERT(DYNAMO_OPTION(shared_syscalls));
/* this syscall will be performed by the shared_syscall code
* we just need to place a return address into the dcontext
* xsi slot or the mangle-next-tag tls slot
*/
if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
# ifdef X64
ASSERT(instr_raw_bits_valid(instr));
/* PR 244741: no 64-bit store-immed-to-mem
* FIXME: would be nice to move this to the stub and
* use the dead rbx register!
*/
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XCX, MANGLE_NEXT_TAG_SLOT));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR((instr->bytes + len))));
PRE(ilist, instr,
INSTR_CREATE_xchg(
dcontext,
opnd_create_tls_slot(os_tls_offset(MANGLE_NEXT_TAG_SLOT)),
opnd_create_reg(REG_XCX)));
# else
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_tls_slot(os_tls_offset(MANGLE_NEXT_TAG_SLOT)),
OPND_CREATE_INTPTR((instr->bytes + len))));
# endif
} else {
PRE(ilist, instr,
instr_create_save_immed32_to_dcontext(
dcontext, (uint)(ptr_uint_t)(instr->bytes + len), XSI_OFFSET));
}
}
/* Handle ignorable syscall. non-ignorable system calls are
* destroyed and removed from the list at the end of this func.
*/
else if (!TEST(INSTR_NI_SYSCALL, instr->flags)) {
if (get_syscall_method() == SYSCALL_METHOD_INT && DYNAMO_OPTION(sygate_int)) {
/* for Sygate need to mangle into a call to int_syscall_addr
* is anyone going to get screwed up by this change
* (say flags change?) [-ignore_syscalls only]*/
ASSERT_NOT_TESTED();
instrlist_replace(ilist, instr, create_syscall_instr(dcontext));
instr_destroy(dcontext, instr);
} else if (get_syscall_method() == SYSCALL_METHOD_SYSCALL)
ASSERT_NOT_TESTED();
else if (get_syscall_method() == SYSCALL_METHOD_WOW64)
ASSERT_NOT_TESTED();
return;
}
} else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* on XP/2003 we have a choice between inserting a trampoline at the
* return pt of the sysenter, which is 0x7ffe0304 (except for
* SP2-patched XP), which is bad since it would clobber whatever's after
* the ret there (unless we used a 0xcc, like Visual Studio 2005 debugger
* does), or replacing the ret addr on the stack -- we choose the
* latter as the lesser of two transparency evils. Note that the
* page at 0x7ffe0000 can't be made writable anyway, so hooking
* isn't possible.
*/
if (TEST(INSTR_SHARED_SYSCALL, instr->flags)) {
ASSERT(DYNAMO_OPTION(shared_syscalls));
}
/* Handle ignorable syscall. non-ignorable system calls are
* destroyed and removed from the list at the end of this func.
*/
else if (!TEST(INSTR_NI_SYSCALL, instr->flags)) {
instr_t *mov_imm;
/* even w/ ignorable syscall, need to make sure regain control */
ASSERT(next_instr != NULL);
ASSERT(DYNAMO_OPTION(indcall2direct));
/* for sygate hack need to basically duplicate what is done in
* shared_syscall, but here we could be shared so would need to
* grab dcontext first etc. */
ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(sygate_sysenter));
/* PR 253943: we don't support sysenter in x64 */
IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* can't have 8-byte imm-to-mem */
/* FIXME PR 303413: we won't properly translate a fault in our
* app stack reference here. It's marked as our own mangling
* so we'll at least return failure from our translate routine.
*/
mov_imm = INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 0),
opnd_create_instr(next_instr));
ASSERT(instr_is_mov_imm_to_tos(mov_imm));
PRE(ilist, instr, mov_imm);
/* do not let any encoding for length be cached!
* o/w will lose pc-relative opnd
*/
/* 'next_instr' is executed after the after-syscall vsyscall
* 'ret', which is executed natively. */
instr_set_meta(instr_get_prev(instr));
return; /* leave syscall instr alone */
}
} else {
SYSLOG_INTERNAL_ERROR("unsupported system call method");
LOG(THREAD, LOG_INTERP, 1, "don't know convention for this syscall method\n");
if (!TEST(INSTR_NI_SYSCALL, instr->flags))
return;
ASSERT_NOT_IMPLEMENTED(false);
}
/* destroy the syscall instruction */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
# endif /* WINDOWS */
}
/***************************************************************************
* NON-SYSCALL INTERRUPT
*/
void
mangle_interrupt(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
# ifdef WINDOWS
int num;
if (instr_get_opcode(instr) != OP_int)
return;
num = instr_get_interrupt_number(instr);
if (num == 0x2b) {
/* A callback finishes and returns to the interruption
* point of the thread with the instruction "int 2b".
* The interrupt ends the block; remove the instruction
* since we'll come back to dynamo to perform the
* interrupt.
*/
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
# endif /* WINDOWS */
}
/***************************************************************************
* Single step exceptions catching
*/
void
mangle_possible_single_step(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
/* Simply inserts two nops so that next instruction where a single step
* exception might occur is in the same basic block and so that the
* translation of a single step exception points back to the instruction
* which set the trap flag.
* The single step exception is a problem because
* the ExceptionAddress should be the next EIP.
*/
POST(ilist, instr, INSTR_CREATE_nop(dcontext));
/* Inserting two nops to get ExceptionAddress on the second one. */
POST(ilist, instr, INSTR_CREATE_nop(dcontext));
}
/***************************************************************************
* Single step exceptions generation
*/
void
mangle_single_step(dcontext_t *dcontext, instrlist_t *ilist, uint flags, instr_t *instr)
{
/* Sets exit reason dynamically. */
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true /*save_xdi*/);
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL /*default*/,
EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(EXIT_REASON_SINGLE_STEP)));
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, EXIT_REASON_SINGLE_STEP,
EXIT_REASON_OFFSET));
}
}
/***************************************************************************
* FLOATING POINT PC
*/
/* The offset of the last floating point PC in the saved state */
# define FNSAVE_PC_OFFS 12
# define FXSAVE_PC_OFFS 8
# define FXSAVE_SIZE 512
void
float_pc_update(dcontext_t *dcontext)
{
byte *state = *(byte **)(((byte *)dcontext->local_state) + FLOAT_PC_STATE_SLOT);
app_pc orig_pc, xl8_pc;
uint offs = 0;
LOG(THREAD, LOG_INTERP, 2, "%s: fp state " PFX "\n", __FUNCTION__, state);
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64) {
/* Check whether the FPU state was saved */
uint64 header_bv = *(uint64 *)(state + FXSAVE_SIZE);
if (!TEST(XCR0_FP, header_bv)) {
LOG(THREAD, LOG_INTERP, 2, "%s: xsave did not save FP state => nop\n",
__FUNCTION__);
}
return;
}
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FNSAVE) {
offs = FNSAVE_PC_OFFS;
} else {
offs = FXSAVE_PC_OFFS;
}
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FXSAVE64 ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64)
orig_pc = *(app_pc *)(state + offs);
else /* just bottom 32 bits of pc */
orig_pc = (app_pc)(ptr_uint_t) * (uint *)(state + offs);
if (orig_pc == NULL) {
/* no fp instr yet */
LOG(THREAD, LOG_INTERP, 2, "%s: pc is NULL\n", __FUNCTION__);
return;
}
/* i#1211-c#1: the orig_pc might be an app pc restored from fldenv */
if (!in_fcache(orig_pc) &&
/* XXX: i#698: there might be fp instr neither in fcache nor in app */
!(in_generated_routine(dcontext, orig_pc) || is_dynamo_address(orig_pc) ||
is_in_dynamo_dll(orig_pc) IF_CLIENT_INTERFACE(|| is_in_client_lib(orig_pc)))) {
bool no_xl8 = true;
# ifdef X64
if (dcontext->upcontext.upcontext.exit_reason != EXIT_REASON_FLOAT_PC_FXSAVE64 &&
dcontext->upcontext.upcontext.exit_reason != EXIT_REASON_FLOAT_PC_XSAVE64) {
/* i#1427: try to fill in the top 32 bits */
ptr_uint_t vmcode = (ptr_uint_t)vmcode_get_start();
if ((vmcode & 0xffffffff00000000) > 0) {
byte *orig_try =
(byte *)((vmcode & 0xffffffff00000000) | (ptr_uint_t)orig_pc);
if (in_fcache(orig_try)) {
LOG(THREAD, LOG_INTERP, 2,
"%s: speculating: pc " PFX " + top half of vmcode = " PFX "\n",
__FUNCTION__, orig_pc, orig_try);
orig_pc = orig_try;
no_xl8 = false;
}
}
}
# endif
if (no_xl8) {
LOG(THREAD, LOG_INTERP, 2, "%s: pc " PFX " is translated already\n",
__FUNCTION__, orig_pc);
return;
}
}
/* We must either grab thread_initexit_lock or be couldbelinking to translate */
mutex_lock(&thread_initexit_lock);
xl8_pc = recreate_app_pc(dcontext, orig_pc, NULL);
mutex_unlock(&thread_initexit_lock);
LOG(THREAD, LOG_INTERP, 2, "%s: translated " PFX " to " PFX "\n", __FUNCTION__,
orig_pc, xl8_pc);
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FXSAVE64 ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64)
*(app_pc *)(state + offs) = xl8_pc;
else /* just bottom 32 bits of pc */
*(uint *)(state + offs) = (uint)(ptr_uint_t)xl8_pc;
}
void
mangle_float_pc(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint *flags INOUT)
{
/* If there is a prior non-control float instr, we can inline the pc update.
* Otherwise, we go back to dispatch. In the latter case we do not support
* building traces across the float pc save: we assume it's rare.
*/
app_pc prior_float = NULL;
bool exit_is_normal = false;
int op = instr_get_opcode(instr);
opnd_t memop = instr_get_dst(instr, 0);
ASSERT(opnd_is_memory_reference(memop));
/* To simplify the code here we don't support rip-rel for local handling.
* We also don't support xsave, as it optionally writes the fpstate.
*/
if (opnd_is_base_disp(memop) && op != OP_xsave32 && op != OP_xsaveopt32 &&
op != OP_xsave64 && op != OP_xsaveopt64 && op != OP_xsavec32 &&
op != OP_xsavec64) {
instr_t *prev;
for (prev = instr_get_prev_expanded(dcontext, ilist, instr); prev != NULL;
prev = instr_get_prev_expanded(dcontext, ilist, prev)) {
dr_fp_type_t type;
if (instr_is_app(prev) && instr_is_floating_ex(prev, &type)) {
bool control_instr = false;
if (type == DR_FP_STATE /* quick check */ &&
/* Check the list from Intel Vol 1 8.1.8 */
(op == OP_fnclex || op == OP_fldcw || op == OP_fnstcw ||
op == OP_fnstsw || op == OP_fnstenv || op == OP_fldenv ||
op == OP_fwait))
control_instr = true;
if (!control_instr) {
prior_float = get_app_instr_xl8(prev);
break;
}
}
}
}
if (prior_float != NULL) {
/* We can link this */
exit_is_normal = true;
STATS_INC(float_pc_from_cache);
/* Replace the stored code cache pc with the original app pc.
* If the app memory is unwritable, instr would have already crashed.
*/
if (op == OP_fnsave || op == OP_fnstenv) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FNSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_4);
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, memop,
OPND_CREATE_INT32((int)(ptr_int_t)prior_float)));
} else if (op == OP_fxsave32) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FXSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_4);
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, memop,
OPND_CREATE_INT32((int)(ptr_int_t)prior_float)));
} else if (op == OP_fxsave64) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FXSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_8);
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)prior_float, memop, ilist,
next_instr, NULL, NULL);
} else
ASSERT_NOT_REACHED();
} else if (!DYNAMO_OPTION(translate_fpu_pc)) {
/* We only support translating when inlined.
* XXX: we can't recover the loss of coarse-grained: we live with that.
*/
exit_is_normal = true;
ASSERT_CURIOSITY(!TEST(FRAG_CANNOT_BE_TRACE, *flags) ||
/* i#1562: it could be marked as no-trace for other reasons */
TEST(FRAG_SELFMOD_SANDBOXED, *flags));
} else {
int reason = 0;
CLIENT_ASSERT(!TEST(FRAG_IS_TRACE, *flags),
"removing an FPU instr in a trace with an FPU state save "
"is not supported");
switch (op) {
case OP_fnsave:
case OP_fnstenv: reason = EXIT_REASON_FLOAT_PC_FNSAVE; break;
case OP_fxsave32: reason = EXIT_REASON_FLOAT_PC_FXSAVE; break;
case OP_fxsave64: reason = EXIT_REASON_FLOAT_PC_FXSAVE64; break;
case OP_xsave32:
case OP_xsavec32:
case OP_xsaveopt32: reason = EXIT_REASON_FLOAT_PC_XSAVE; break;
case OP_xsave64:
case OP_xsavec64:
case OP_xsaveopt64: reason = EXIT_REASON_FLOAT_PC_XSAVE64; break;
default: ASSERT_NOT_REACHED();
}
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, *flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true /*save_xdi*/);
PRE(ilist, instr,
INSTR_CREATE_mov_st(
dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL /*default*/,
EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(reason)));
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, reason,
EXIT_REASON_OFFSET));
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XDI, DCONTEXT_BASE_SPILL_SLOT));
}
/* At this point, xdi is spilled into DCONTEXT_BASE_SPILL_SLOT */
/* We pass the address in the xbx tls slot, which is untouched by fcache_return.
*
* XXX: handle far refs! Xref drutil_insert_get_mem_addr(), and sandbox_write()
* hitting this same issue.
*/
ASSERT_CURIOSITY(!opnd_is_far_memory_reference(memop));
if (opnd_is_base_disp(memop)) {
opnd_set_size(&memop, OPSZ_lea);
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XDI), memop));
} else {
ASSERT(opnd_is_abs_addr(memop) IF_X64(|| opnd_is_rel_addr(memop)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
OPND_CREATE_INTPTR(opnd_get_addr(memop))));
}
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XDI, FLOAT_PC_STATE_SLOT));
/* Restore app %xdi */
if (TEST(FRAG_SHARED, *flags))
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
else {
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, REG_XDI,
DCONTEXT_BASE_SPILL_SLOT));
}
}
if (exit_is_normal && DYNAMO_OPTION(translate_fpu_pc)) {
instr_t *exit_jmp = next_instr;
while (exit_jmp != NULL && !instr_is_exit_cti(exit_jmp))
exit_jmp = instr_get_next(next_instr);
ASSERT(exit_jmp != NULL);
ASSERT(instr_branch_special_exit(exit_jmp));
instr_branch_set_special_exit(exit_jmp, false);
/* XXX: there could be some other reason this was marked
* cannot-be-trace that we're undoing here...
*/
if (TEST(FRAG_CANNOT_BE_TRACE, *flags))
*flags &= ~FRAG_CANNOT_BE_TRACE;
}
}
/***************************************************************************
* CPUID FOOLING
*/
# ifdef FOOL_CPUID
/* values returned by cpuid for Mobile Pentium MMX processor (family 5, model 8)
* minus mmx (==0x00800000 in CPUID_1_EDX)
* FIXME: change model number to a Pentium w/o MMX!
*/
# define CPUID_0_EAX 0x00000001
# define CPUID_0_EBX 0x756e6547
# define CPUID_0_ECX 0x6c65746e
# define CPUID_0_EDX 0x49656e69
/* extended family, extended model, type, family, model, stepping id: */
/* 20:27, 16:19, 12:13, 8:11, 4:7, 0:3 */
# define CPUID_1_EAX 0x00000581
# define CPUID_1_EBX 0x00000000
# define CPUID_1_ECX 0x00000000
# define CPUID_1_EDX 0x000001bf
static void
mangle_cpuid(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
/* assumption: input value is put in eax on prev instr, or
* on instr prior to that and prev is an inc instr.
* alternative is to insert conditional branch...and save eflags, etc.
*/
instr_t *prev = instr_get_prev(instr);
opnd_t op;
int input, out_eax, out_ebx, out_ecx, out_edx;
LOG(THREAD, LOG_INTERP, 1, "fooling cpuid instruction!\n");
ASSERT(prev != NULL);
prev = instr_get_prev_expanded(dcontext, ilist, instr);
instr_decode(dcontext, instr);
if (!instr_valid(instr))
goto cpuid_give_up;
loginst(dcontext, 2, prev, "prior to cpuid");
/* FIXME: maybe should insert code to dispatch on eax, rather than
* this hack, which is based on photoshop, which either does
* "xor eax,eax" or "xor eax,eax; inc eax"
*/
if (!instr_is_mov_constant(prev, &input)) {
/* we only allow inc here */
if (instr_get_opcode(prev) != OP_inc)
goto cpuid_give_up;
op = instr_get_dst(prev, 0);
if (!opnd_is_reg(op) || opnd_get_reg(op) != REG_EAX)
goto cpuid_give_up;
/* now check instr before inc */
prev = instr_get_prev(prev);
if (!instr_is_mov_constant(prev, &input) || input != 0)
goto cpuid_give_up;
input = 1;
/* now check that mov 0 is into eax */
}
if (instr_num_dsts(prev) == 0)
goto cpuid_give_up;
op = instr_get_dst(prev, 0);
if (!opnd_is_reg(op) || opnd_get_reg(op) != REG_EAX)
goto cpuid_give_up;
if (input == 0) {
out_eax = CPUID_0_EAX;
out_ebx = CPUID_0_EBX;
out_ecx = CPUID_0_ECX;
out_edx = CPUID_0_EDX;
} else {
/* 1 or anything higher all return same info */
out_eax = CPUID_1_EAX;
out_ebx = CPUID_1_EBX;
out_ecx = CPUID_1_ECX;
out_edx = CPUID_1_EDX;
}
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EAX),
OPND_CREATE_INT32(out_eax)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_INT32(out_ebx)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_INT32(out_ecx)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EDX),
OPND_CREATE_INT32(out_edx)));
/* destroy the cpuid instruction */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return;
cpuid_give_up:
LOG(THREAD, LOG_INTERP, 1, "\tcpuid fool: giving up\n");
return;
}
# endif /* FOOL_CPUID */
void
mangle_exit_cti_prefixes(dcontext_t *dcontext, instr_t *instr)
{
uint prefixes = instr_get_prefixes(instr);
if (prefixes != 0) {
bool remove = false;
/* Case 8738: while for transparency it would be best to maintain all
* prefixes, our patching and other routines make assumptions about
* the length of exit ctis. Plus our elision removes the whole
* instr in any case.
*/
if (instr_is_cbr(instr)) {
if (TESTANY(~(PREFIX_JCC_TAKEN | PREFIX_JCC_NOT_TAKEN), prefixes)) {
remove = true;
prefixes &= (PREFIX_JCC_TAKEN | PREFIX_JCC_NOT_TAKEN);
}
} else {
/* prefixes on ubr or mbr should be nops and for ubr will mess up
* our size assumptions so drop them (i#435)
*/
remove = true;
prefixes = 0;
}
if (remove) {
LOG(THREAD, LOG_INTERP, 4,
"\tremoving unknown prefixes " PFX " from " PFX "\n", prefixes,
instr_get_raw_bits(instr));
ASSERT(instr_operands_valid(instr)); /* ensure will encode w/o raw bits */
instr_set_prefixes(instr, prefixes);
}
} else if ((instr_get_opcode(instr) == OP_jmp &&
instr_length(dcontext, instr) > JMP_LONG_LENGTH) ||
(instr_is_cbr(instr) && instr_length(dcontext, instr) > CBR_LONG_LENGTH)) {
/* i#1988: remove MPX prefixes as they mess up our nop padding.
* i#1312 covers marking as actual prefixes, and we should keep them.
*/
LOG(THREAD, LOG_INTERP, 4, "\tremoving unknown jmp prefixes from " PFX "\n",
instr_get_raw_bits(instr));
instr_set_raw_bits_valid(instr, false);
}
}
# ifdef X64
/* PR 215397: re-relativize rip-relative data addresses */
/* Should return NULL if it destroy "instr". We don't support both destroying
* (done only for x86: i#393) and changing next_instr (done only for ARM).
*/
instr_t *
mangle_rel_addr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
uint opc = instr_get_opcode(instr);
app_pc tgt;
opnd_t dst, src;
ASSERT(instr_has_rel_addr_reference(instr));
instr_get_rel_addr_target(instr, &tgt);
STATS_INC(rip_rel_instrs);
# ifdef RCT_IND_BRANCH
if (TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_call)) ||
TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_jump))) {
/* PR 215408: record addresses taken via rip-relative instrs */
rct_add_rip_rel_addr(dcontext, tgt _IF_DEBUG(instr_get_translation(instr)));
}
# endif
if (opc == OP_lea) {
/* segment overrides are ignored on lea */
opnd_t immed;
dst = instr_get_dst(instr, 0);
src = instr_get_src(instr, 0);
ASSERT(opnd_is_reg(dst));
ASSERT(opnd_is_rel_addr(src));
ASSERT(opnd_get_addr(src) == tgt);
/* Replace w/ an absolute immed of the target app address, following Intel
* Table 3-59 "64-bit Mode LEA Operation with Address and Operand Size
* Attributes" */
/* FIXME PR 253446: optimization: we could leave this as rip-rel if it
* still reaches from the code cache. */
if (reg_get_size(opnd_get_reg(dst)) == OPSZ_8) {
/* PR 253327: there is no explicit addr32 marker; we assume
* that decode or the user already zeroed out the top bits
* if there was an addr32 prefix byte or the user wants
* that effect */
immed = OPND_CREATE_INTPTR((ptr_int_t)tgt);
} else if (reg_get_size(opnd_get_reg(dst)) == OPSZ_4)
immed = OPND_CREATE_INT32((int)(ptr_int_t)tgt);
else {
ASSERT(reg_get_size(opnd_get_reg(dst)) == OPSZ_2);
immed = OPND_CREATE_INT16((short)(ptr_int_t)tgt);
}
PRE(ilist, instr, INSTR_CREATE_mov_imm(dcontext, dst, immed));
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
STATS_INC(rip_rel_lea);
return NULL; /* == destroyed instr */
} else {
/* PR 251479 will automatically re-relativize if it reaches,
* but if it doesn't we need to handle that here (since that
* involves an encoding length change, which complicates many
* use cases if done at instr encode time).
* We don't yet know exactly where we're going to encode this bb,
* so we're conservative and check for all reachability from our
* heap (assumed to be a single heap: xref PR 215395, and xref
* potential secondary code caches PR 253446.
*/
if (!rel32_reachable_from_vmcode(tgt)) {
int si = -1, di = -1;
opnd_t relop, newop;
bool spill = true;
/* FIXME PR 253446: for mbr, should share the xcx spill */
reg_id_t scratch_reg = REG_XAX;
si = instr_get_rel_addr_src_idx(instr);
di = instr_get_rel_addr_dst_idx(instr);
if (si >= 0) {
relop = instr_get_src(instr, si);
ASSERT(di < 0 || opnd_same(relop, instr_get_dst(instr, di)));
/* If it's a load (OP_mov_ld, or OP_movzx, etc.), use dead reg */
if (instr_num_srcs(instr) == 1 && /* src is the rip-rel opnd */
instr_num_dsts(instr) == 1 && /* only one dest: a register */
opnd_is_reg(instr_get_dst(instr, 0)) && !instr_is_predicated(instr)) {
opnd_size_t sz = opnd_get_size(instr_get_dst(instr, 0));
reg_id_t reg = opnd_get_reg(instr_get_dst(instr, 0));
/* if target is 16 or 8 bit sub-register the whole reg is not dead
* (for 32-bit, top 32 bits are cleared) */
if (reg_is_gpr(reg) && (reg_is_32bit(reg) || reg_is_64bit(reg))) {
spill = false;
scratch_reg = opnd_get_reg(instr_get_dst(instr, 0));
if (sz == OPSZ_4)
scratch_reg = reg_32_to_64(scratch_reg);
/* we checked all opnds: should not read reg */
ASSERT(
!instr_reads_from_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
STATS_INC(rip_rel_unreachable_nospill);
}
}
} else {
relop = instr_get_dst(instr, di);
}
/* PR 263369: we can't just look for instr_reads_from_reg here since
* our no-spill optimization above may miss some writes.
*/
if (spill && instr_uses_reg(instr, scratch_reg)) {
/* mbr (for which we'll use xcx once we optimize) should not
* get here: can't use registers (except xsp) */
ASSERT(scratch_reg == REG_XAX);
do {
scratch_reg++;
ASSERT(scratch_reg <= REG_STOP_64);
} while (instr_uses_reg(instr, scratch_reg));
}
ASSERT(!instr_reads_from_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
ASSERT(!spill || !instr_writes_to_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
/* XXX PR 253446: Optimize by looking ahead for dead registers, and
* sharing single spill across whole bb, or possibly building local code
* cache to avoid unreachability: all depending on how many rip-rel
* instrs we see. We'll watch the stats.
*/
if (spill) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, 0, scratch_reg, MANGLE_RIPREL_SPILL_SLOT,
XAX_OFFSET));
}
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(scratch_reg),
OPND_CREATE_INTPTR((ptr_int_t)tgt)));
newop = opnd_create_far_base_disp(opnd_get_segment(relop), scratch_reg,
REG_NULL, 0, 0, opnd_get_size(relop));
if (si >= 0)
instr_set_src(instr, si, newop);
if (di >= 0)
instr_set_dst(instr, di, newop);
/* we need the whole spill...restore region to all be marked mangle */
instr_set_our_mangling(instr, true);
if (spill) {
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, scratch_reg,
MANGLE_RIPREL_SPILL_SLOT));
}
STATS_INC(rip_rel_unreachable);
}
}
return next_instr;
}
# endif
/***************************************************************************
* Reference with segment register (fs/gs)
*/
# ifdef UNIX
static int
instr_get_seg_ref_dst_idx(instr_t *instr)
{
int i;
opnd_t opnd;
if (!instr_valid(instr))
return -1;
/* must go to level 3 operands */
for (i = 0; i < instr_num_dsts(instr); i++) {
opnd = instr_get_dst(instr, i);
if (opnd_is_far_base_disp(opnd) &&
(opnd_get_segment(opnd) == SEG_GS || opnd_get_segment(opnd) == SEG_FS))
return i;
}
return -1;
}
static int
instr_get_seg_ref_src_idx(instr_t *instr)
{
int i;
opnd_t opnd;
if (!instr_valid(instr))
return -1;
/* must go to level 3 operands */
for (i = 0; i < instr_num_srcs(instr); i++) {
opnd = instr_get_src(instr, i);
if (opnd_is_far_base_disp(opnd) &&
(opnd_get_segment(opnd) == SEG_GS || opnd_get_segment(opnd) == SEG_FS))
return i;
}
return -1;
}
static ushort tls_slots[4] = { TLS_XAX_SLOT, TLS_XCX_SLOT, TLS_XDX_SLOT, TLS_XBX_SLOT };
/* mangle the instruction OP_mov_seg, i.e. the instruction that
* read/update the segment register.
*/
void
mangle_mov_seg(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
reg_id_t seg;
opnd_t opnd, dst;
opnd_size_t dst_sz;
ASSERT(instr_get_opcode(instr) == OP_mov_seg);
ASSERT(instr_num_srcs(instr) == 1);
ASSERT(instr_num_dsts(instr) == 1);
STATS_INC(app_mov_seg_mangled);
/* for update, we simply change it to a nop because we will
* update it when dynamorio entering code cache to execute
* this basic block.
*/
dst = instr_get_dst(instr, 0);
if (opnd_is_reg(dst) && reg_is_segment(opnd_get_reg(dst))) {
app_pc xl8;
seg = opnd_get_reg(dst);
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
# endif
/* must use the original instr, which might be used by caller */
xl8 = get_app_instr_xl8(instr);
instr_reuse(dcontext, instr);
instr_set_opcode(instr, OP_nop);
instr_set_num_opnds(dcontext, instr, 0, 0);
instr_set_translation(instr, xl8);
return;
}
/* for read seg, we mangle it */
opnd = instr_get_src(instr, 0);
ASSERT(opnd_is_reg(opnd));
seg = opnd_get_reg(opnd);
ASSERT(reg_is_segment(seg));
if (seg != SEG_FS && seg != SEG_GS)
return;
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
# endif
/* There are two possible mov_seg instructions:
* 8C/r MOV r/m16,Sreg Move segment register to r/m16
* REX.W + 8C/r MOV r/m64,Sreg Move zero extended 16-bit segment
* register to r/m64
* Note, In 32-bit mode, the assembler may insert the 16-bit operand-size
* prefix with this instruction.
*/
/* we cannot replace the instruction but only change it. */
dst = instr_get_dst(instr, 0);
dst_sz = opnd_get_size(dst);
opnd =
opnd_create_sized_tls_slot(os_tls_offset(os_get_app_tls_reg_offset(seg)), OPSZ_2);
if (opnd_is_reg(dst)) { /* dst is a register */
/* mov %gs:off => reg */
instr_set_src(instr, 0, opnd);
instr_set_opcode(instr, OP_mov_ld);
if (dst_sz != OPSZ_2)
instr_set_opcode(instr, OP_movzx);
} else { /* dst is memory, need steal a register. */
reg_id_t reg;
instr_t *ti;
for (reg = REG_XAX; reg < REG_XBX; reg++) {
if (!instr_uses_reg(instr, reg))
break;
}
/* We need save the register to corresponding slot for correct restore,
* so only use the first four registers.
*/
ASSERT(reg <= REG_XBX);
/* save reg */
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, reg, tls_slots[reg - REG_XAX]));
/* restore reg */
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, reg, tls_slots[reg - REG_XAX]));
switch (dst_sz) {
case OPSZ_8: IF_NOT_X64(ASSERT(false);) break;
case OPSZ_4: IF_X64(reg = reg_64_to_32(reg);) break;
case OPSZ_2:
IF_X64(reg = reg_64_to_32(reg);)
reg = reg_32_to_16(reg);
break;
default: ASSERT(false);
}
/* mov %gs:off => reg */
ti = INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(reg), opnd);
if (dst_sz != OPSZ_2)
instr_set_opcode(ti, OP_movzx);
PRE(ilist, instr, ti);
/* change mov_seg to mov_st: mov reg => [mem] */
instr_set_src(instr, 0, opnd_create_reg(reg));
instr_set_opcode(instr, OP_mov_st);
}
}
/* mangle the instruction that reference memory via segment register */
void
mangle_seg_ref(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
int si = -1, di = -1;
opnd_t segop, newop;
bool spill = true;
reg_id_t scratch_reg = REG_XAX, seg = REG_NULL;
/* exit cti won't be seg ref */
if (instr_is_exit_cti(instr))
return;
/* mbr will be handled separatly */
if (instr_is_mbr(instr))
return;
if (instr_get_opcode(instr) == OP_lea)
return;
/* XXX: maybe using decode_cti and then a check on prefix could be
* more efficient as it only examines a few byte and avoid fully decoding
* the instruction. For simplicity, we examine every operands instead.
*/
/* 1. get ref opnd */
si = instr_get_seg_ref_src_idx(instr);
di = instr_get_seg_ref_dst_idx(instr);
if (si < 0 && di < 0)
return;
if (si >= 0) {
segop = instr_get_src(instr, si);
ASSERT(di < 0 || opnd_same(segop, instr_get_dst(instr, di)));
} else {
segop = instr_get_dst(instr, di);
}
seg = opnd_get_segment(segop);
if (seg != SEG_GS && seg != SEG_FS)
return;
# ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
# endif
STATS_INC(app_seg_refs_mangled);
DOLOG(3, LOG_INTERP,
{ loginst(dcontext, 3, instr, "reference with fs/gs segment"); });
/* 2. decide the scratch reg */
/* Opt: if it's a load (OP_mov_ld, or OP_movzx, etc.), use dead reg */
if (si >= 0 && instr_num_srcs(instr) == 1 && /* src is the seg ref opnd */
instr_num_dsts(instr) == 1 && /* only one dest: a register */
opnd_is_reg(instr_get_dst(instr, 0)) && !instr_is_predicated(instr)) {
reg_id_t reg = opnd_get_reg(instr_get_dst(instr, 0));
/* if target is 16 or 8 bit sub-register the whole reg is not dead
* (for 32-bit, top 32 bits are cleared) */
if (reg_is_gpr(reg) && (reg_is_32bit(reg) || reg_is_64bit(reg)) &&
/* mov [%fs:%xax] => %xax */
!instr_reads_from_reg(instr, reg, DR_QUERY_DEFAULT)) {
spill = false;
scratch_reg = reg;
# ifdef X64
if (opnd_get_size(instr_get_dst(instr, 0)) == OPSZ_4)
scratch_reg = reg_32_to_64(reg);
# endif
}
}
if (spill) {
/* we pick a scratch register from XAX, XBX, XCX, or XDX
* that has direct TLS slots.
*/
for (scratch_reg = REG_XAX; scratch_reg <= REG_XBX; scratch_reg++) {
/* the register must not be used by the instr, either read or write,
* because we will mangle it when executing the instr (no read from),
* and restore it after that instr (no write to).
*/
if (!instr_uses_reg(instr, scratch_reg))
break;
}
ASSERT(scratch_reg <= REG_XBX);
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, scratch_reg,
tls_slots[scratch_reg - REG_XAX]));
}
newop = mangle_seg_ref_opnd(dcontext, ilist, instr, segop, scratch_reg);
if (si >= 0)
instr_set_src(instr, si, newop);
if (di >= 0)
instr_set_dst(instr, di, newop);
/* we need the whole spill...restore region to all be marked mangle */
instr_set_our_mangling(instr, true);
/* FIXME: i#107 we should check the bound and raise signal if out of bound. */
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "re-wrote app tls reference"); });
if (spill) {
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, scratch_reg,
tls_slots[scratch_reg - REG_XAX]));
}
}
# endif /* UNIX */
# ifdef ANNOTATIONS
/***************************************************************************
* DR and Valgrind annotations
*/
void
mangle_annotation_helper(dcontext_t *dcontext, instr_t *label, instrlist_t *ilist)
{
dr_instr_label_data_t *label_data = instr_get_label_data_area(label);
dr_annotation_handler_t *handler = GET_ANNOTATION_HANDLER(label_data);
dr_annotation_receiver_t *receiver = handler->receiver_list;
opnd_t *args = NULL;
ASSERT(handler->type == DR_ANNOTATION_HANDLER_CALL);
while (receiver != NULL) {
if (handler->num_args != 0) {
args = HEAP_ARRAY_ALLOC(dcontext, opnd_t, handler->num_args, ACCT_CLEANCALL,
UNPROTECTED);
memcpy(args, handler->args, sizeof(opnd_t) * handler->num_args);
}
dr_insert_clean_call_ex_varg(dcontext, ilist, label,
receiver->instrumentation.callback,
receiver->save_fpstate ? DR_CLEANCALL_SAVE_FLOAT : 0,
handler->num_args, args);
if (handler->num_args != 0) {
HEAP_ARRAY_FREE(dcontext, args, opnd_t, handler->num_args, ACCT_CLEANCALL,
UNPROTECTED);
}
receiver = receiver->next;
}
}
# endif
/* END OF CONTROL-FLOW MANGLING ROUTINES
*###########################################################################
*###########################################################################
*/
/* SELF-MODIFYING-CODE SANDBOXING
*
* When we detect it, we take an exit that targets our own routine
* fragment_self_write. Dispatch checks for that target and if it finds it,
* it calls that routine, so don't worry about building a bb for it.
* Returns false if the bb has invalid instrs in the middle and it should
* be rebuilt from scratch.
*/
# undef SAVE_TO_DC_OR_TLS
# undef RESTORE_FROM_DC_OR_TLS
/* PR 244737: x64 uses tls to avoid reachability issues w/ absolute addresses */
# ifdef X64
# define SAVE_TO_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_save_to_tls((dc), (reg), (tls_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_restore_from_tls((dc), (reg), (tls_offs))
# else
# define SAVE_TO_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_save_to_dcontext((dc), (reg), (dc_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_restore_from_dcontext((dc), (reg), (dc_offs))
# endif
static void
sandbox_rep_instr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr, instr_t *next,
app_pc start_pc, app_pc end_pc /* end is open */)
{
/* put checks before instr, set some reg as a flag, act on it
* after instr (even if overwrite self will execute rep to completion)
* want to read DF to find direction (0=inc xsi/xdi, 1=dec),
* but only way to read is to do a pushf!
* Solution: if cld or std right before rep instr, use that info,
* otherwise check for BOTH directions!
* xcx is a pre-check, xsi/xdi are inc/dec after memory op, so
* xdi+xcx*opndsize == instr of NEXT write, so open-ended there:
* if DF==0:
* if (xdi < end_pc && xdi+xcx*opndsize > start_pc) => self-write
* if DF==1:
* if (xdi > start_pc && xdi-xcx*opndsize > end_pc) => self-write
* both:
* if (xdi-xcx*opndsize < end_pc && xdi+xcx*opndsize > start_pc) => self-write
* opndsize is 1,2, or 4 => use lea for mul
* lea (xdi,xcx,opndsize),xcx
*
* save flags and xax
* save xbx
* lea (xdi,xcx,opndsize),xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): save xdx
* if x64 && start_pc > 4GB: mov start_pc, xdx
* cmp xbx, IF_X64_>4GB_ELSE(xdx, start_pc)
* mov $0,xbx # for if ok
* jle ok # open b/c address of next rep write
* lea (,xcx,opndsize),xbx
* neg xbx # sub does dst - src
* add xdi,xbx
* if x64 && end_pc > 4GB: mov end_pc, xdx
* cmp xbx, IF_X64_>4GB_ELSE(xdx, end_pc)
* mov $0,xbx # for if ok
* jge ok # end is open
* mov $1,xbx
* ok:
* restore flags and xax (xax used by stos)
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xdx
* <rep instr> # doesn't use xbx
* (PR 267764/i#398: we special-case restore xbx on cxt xl8 if this instr faults)
* mov xbx,xcx # we can use xcx, it's dead since 0 after rep
* restore xbx
* jecxz ok2 # if xbx was 1 we'll fall through and exit
* mov $0,xcx
* jmp <instr after write, flag as INSTR_BRANCH_SPECIAL_EXIT>
* ok2:
* <label> # ok2 can't == next, b/c next may be ind br -> mangled w/ instrs
* # inserted before it, so jecxz would target too far
*/
instr_t *ok = INSTR_CREATE_label(dcontext);
instr_t *ok2 = INSTR_CREATE_label(dcontext);
instr_t *jmp;
app_pc after_write;
uint opndsize = opnd_size_in_bytes(opnd_get_size(instr_get_dst(instr, 0)));
uint flags =
instr_eflags_to_fragment_eflags(forward_eflags_analysis(dcontext, ilist, next));
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
instr_t *next_app = next;
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "writes memory"); });
ASSERT(!instr_is_call_indirect(instr)); /* FIXME: can you have REP on on CALL's */
/* skip meta instrs to find next app instr (xref PR 472190) */
while (next_app != NULL && instr_is_meta(next_app))
next_app = instr_get_next(next_app);
if (next_app != NULL) {
/* client may have inserted non-meta instrs, so use translation first
* (xref PR 472190)
*/
if (instr_get_app_pc(next_app) != NULL)
after_write = instr_get_app_pc(next_app);
else if (!instr_raw_bits_valid(next_app)) {
/* next must be the final jmp! */
ASSERT(instr_is_ubr(next_app) && instr_get_next(next_app) == NULL);
after_write = opnd_get_pc(instr_get_target(next_app));
} else
after_write = instr_get_raw_bits(next_app);
} else {
after_write = end_pc;
}
insert_save_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_XDI, REG_XCX, opndsize, 0, OPSZ_lea)));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
}
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XDX)));
} else {
# endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
# ifdef X64
}
# endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(0)));
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_jle, opnd_create_instr(ok)));
PRE(ilist, instr,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_NULL, REG_XCX, opndsize, 0, OPSZ_lea)));
PRE(ilist, instr, INSTR_CREATE_neg(dcontext, opnd_create_reg(REG_XBX)));
PRE(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XBX), opnd_create_reg(REG_XDI)));
# ifdef X64
if ((ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX),
OPND_CREATE_INTPTR(end_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XDX)));
} else {
# endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)end_pc)));
# ifdef X64
}
# endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(0)));
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(ok)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(1)));
PRE(ilist, instr, ok);
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
}
# endif
/* instr goes here */
PRE(ilist, next,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX),
opnd_create_reg(REG_XBX)));
PRE(ilist, next, RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
PRE(ilist, next, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(ok2)));
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INT32(0))); /* on x64 top 32 bits zeroed */
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(after_write));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, next, jmp);
PRE(ilist, next, ok2);
}
static void
sandbox_write(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr, instr_t *next,
opnd_t op, app_pc start_pc, app_pc end_pc /* end is open */)
{
/* can only test for equality w/o modifying flags, so save them
* if (addr < end_pc && addr+opndsize > start_pc) => self-write
* <write memory>
* save xbx
* lea memory,xbx
* save flags and xax # after lea of memory in case memory includes xax
* if x64 && (start_pc > 4GB || end_pc > 4GB): save xcx
* if x64 && end_pc > 4GB: mov end_pc, xcx
* cmp xbx, IF_X64_>4GB_ELSE(xcx, end_pc)
* jge ok # end is open
* lea opndsize(xbx),xbx
* if x64 && start_pc > 4GB: mov start_pc, xcx
* cmp xbx, IF_X64_>4GB_ELSE(xcx, start_pc)
* jle ok # open since added size
* restore flags (using xbx) and xax
* restore xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xcx
* jmp <instr after write, flag as INSTR_BRANCH_SPECIAL_EXIT>
* ok:
* restore flags and xax
* restore xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xcx
*/
instr_t *ok = INSTR_CREATE_label(dcontext), *jmp;
app_pc after_write = NULL;
uint opndsize = opnd_size_in_bytes(opnd_get_size(op));
uint flags =
instr_eflags_to_fragment_eflags(forward_eflags_analysis(dcontext, ilist, next));
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
instr_t *next_app = next;
instr_t *get_addr_at = next;
int opcode = instr_get_opcode(instr);
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "writes memory"); });
/* skip meta instrs to find next app instr (xref PR 472190) */
while (next_app != NULL && instr_is_meta(next_app))
next_app = instr_get_next(next_app);
if (next_app != NULL) {
/* client may have inserted non-meta instrs, so use translation first
* (xref PR 472190)
*/
if (instr_get_app_pc(next_app) != NULL)
after_write = instr_get_app_pc(next_app);
else if (!instr_raw_bits_valid(next_app)) {
/* next must be the final artificially added jmp! */
ASSERT(instr_is_ubr(next_app) && instr_get_next(next_app) == NULL);
/* for sure this is the last jmp out, but it
* doesn't have to be a direct jmp but instead
* it could be the exit branch we add as an
* for an indirect call - which is the only ind branch
* that writes to memory
* CALL* already means that we're leaving the block and it cannot be a selfmod
* instruction even though it writes to memory
*/
DOLOG(4, LOG_INTERP, { loginst(dcontext, 4, next_app, "next app instr"); });
after_write = opnd_get_pc(instr_get_target(next_app));
LOG(THREAD, LOG_INTERP, 4, "after_write = " PFX " next should be final jmp\n",
after_write);
} else
after_write = instr_get_raw_bits(next_app);
} else {
ASSERT_NOT_TESTED();
after_write = end_pc;
}
if (opcode == OP_ins || opcode == OP_movs || opcode == OP_stos) {
/* These instrs modify their own addressing register so we must
* get the address pre-write. None of them touch xbx.
*/
get_addr_at = instr;
ASSERT(!instr_writes_to_reg(instr, REG_XBX, DR_QUERY_DEFAULT) &&
!instr_reads_from_reg(instr, REG_XBX, DR_QUERY_DEFAULT));
}
PRE(ilist, get_addr_at,
SAVE_TO_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
/* XXX: Basically reimplementing drutil_insert_get_mem_addr(). */
/* FIXME i#986: Sandbox far writes. Not a hypothetical problem! NaCl uses
* segments for its x86 sandbox, although they are 0 based with a limit.
* qq.exe has them in sandboxed code.
*/
ASSERT_CURIOSITY(!opnd_is_far_memory_reference(op) ||
/* Standard far refs */
opcode == OP_ins || opcode == OP_movs || opcode == OP_stos);
if (opnd_is_base_disp(op)) {
/* change to OPSZ_lea for lea */
opnd_set_size(&op, OPSZ_lea);
PRE(ilist, get_addr_at, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX), op));
if ((opcode == OP_push && opnd_is_base_disp(op) &&
opnd_get_index(op) == DR_REG_NULL &&
reg_to_pointer_sized(opnd_get_base(op)) == DR_REG_XSP) ||
opcode == OP_push_imm || opcode == OP_pushf || opcode == OP_pusha ||
opcode == OP_pop /* pop into stack slot */ || opcode == OP_call ||
opcode == OP_call_ind || opcode == OP_call_far || opcode == OP_call_far_ind) {
/* Undo xsp adjustment made by the instruction itself.
* We could use get_addr_at to acquire the address pre-instruction
* for some of these, but some can read or write ebx.
*/
PRE(ilist, next,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_NULL, REG_XBX, 1,
-opnd_get_disp(op), OPSZ_lea)));
}
} else {
/* handle abs addr pointing within fragment */
/* XXX: Can optimize this by doing address comparison at translation
* time. Might happen frequently if a JIT stores data on the same page
* as its code. For now we hook into existing sandboxing code.
*/
app_pc abs_addr;
ASSERT(opnd_is_abs_addr(op) IF_X64(|| opnd_is_rel_addr(op)));
abs_addr = opnd_get_addr(op);
PRE(ilist, get_addr_at,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INTPTR(abs_addr)));
}
insert_save_eflags(dcontext, ilist, next, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next, SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
if ((ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(end_pc)));
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XCX)));
} else {
# endif
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)end_pc)));
# ifdef X64
}
# endif
PRE(ilist, next, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(ok)));
PRE(ilist, next,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_XBX, REG_NULL, 0, opndsize, OPSZ_lea)));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XCX)));
} else {
# endif
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
# ifdef X64
}
# endif
PRE(ilist, next, INSTR_CREATE_jcc(dcontext, OP_jle, opnd_create_instr(ok)));
insert_restore_eflags(dcontext, ilist, next, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, next, RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
# endif
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(after_write));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, next, jmp);
PRE(ilist, next, ok);
insert_restore_eflags(dcontext, ilist, next, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, next, RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
# endif
}
static bool
sandbox_top_of_bb_check_s2ro(dcontext_t *dcontext, app_pc start_pc)
{
return (DYNAMO_OPTION(sandbox2ro_threshold) > 0 &&
/* we can't make stack regions ro so don't put in the instrumentation */
!is_address_on_stack(dcontext, start_pc) &&
/* case 9098 we don't want to ever make RO untrackable driver areas */
!is_driver_address(start_pc));
}
static void
sandbox_top_of_bb(dcontext_t *dcontext, instrlist_t *ilist, bool s2ro, uint flags,
app_pc start_pc, app_pc end_pc, /* end is open */
bool for_cache,
/* for obtaining the two patch locations: */
patch_list_t *patchlist, cache_pc *copy_start_loc,
cache_pc *copy_end_loc)
{
/* add check at top of ilist that compares actual app instructions versus
* copy we saved, stored in cache right after fragment itself. leave its
* start address blank here, will be touched up after emitting this ilist.
*
* FIXME case 8165/PR 212600: optimize this: move reg restores to
* custom fcache_return, use cmpsd instead of cmpsb, etc.
*
* if eflags live entering this bb:
* save xax
* lahf
* seto %al
* endif
* if (-sandbox2ro_threshold > 0)
* if x64: save xcx
* incl &vm_area_t->exec_count (for x64, via xcx)
* cmp sandbox2ro_threshold, vm_area_t->exec_count (for x64, via xcx)
* if eflags live entering this bb, or x64:
* jl past_threshold
* if x64: restore xcx
* if eflags live entering this bb:
* jmp restore_eflags_and_exit
* else
* jmp start_pc marked as selfmod exit
* endif
* past_threshold:
* else
* jge start_pc marked as selfmod exit
* endif
* endif
* if (-sandbox2ro_threshold == 0) && !x64)
* save xcx
* endif
* save xsi
* save xdi
* if stats:
* inc num_sandbox_execs stat (for x64, via xsi)
* endif
* mov start_pc,xsi
* mov copy_start_pc,xdi # 1 opcode byte, then offset
* # => patch point 1
* cmpsb
* if copy_size > 1 # not an opt: for correctness: if "repe cmpsb" has xcx==0, it
* # doesn't touch eflags and we treat cmp results as cmpsb results
* jne check_results
* if x64 && start_pc > 4GB
* mov start_pc, xcx
* cmp xsi, xcx
* else
* cmp xsi, start_pc
* endif
* mov copy_size-1, xcx # -1 b/c we already checked 1st byte
* jge forward
* mov copy_end_pc - 1, xdi # -1 b/c it is the end of this basic block
* # => patch point 2
* mov end_pc - 1, xsi
* forward:
* repe cmpsb
* endif # copy_size > 1
* check_results:
* restore xcx
* restore xsi
* restore xdi
* if eflags live:
* je start_bb
* restore_eflags_and_exit:
* add $0x7f,%al
* sahf
* restore xax
* jmp start_pc marked as selfmod exit
* else
* jne start_pc marked as selfmod exit
* endif
* start_bb:
* if eflags live:
* add $0x7f,%al
* sahf
* restore xax
* endif
*/
instr_t *instr, *jmp;
instr_t *restore_eflags_and_exit = NULL;
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
bool saved_xcx = false;
instr_t *check_results = INSTR_CREATE_label(dcontext);
instr = instrlist_first_expanded(dcontext, ilist);
insert_save_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
if (s2ro) {
/* It's difficult to use lea/jecxz here as we want to use a shared
* counter but no lock, and thus need a relative comparison, while
* lea/jecxz can only do an exact comparison. We could be exact by
* having a separate counter per (private) fragment but by spilling
* eflags we can inc memory, making the scheme here not inefficient.
*/
uint thresh = DYNAMO_OPTION(sandbox2ro_threshold);
uint *counter;
if (for_cache)
counter = get_selfmod_exec_counter(start_pc);
else {
/* Won't find exec area since not a real fragment (probably
* a recreation post-flush). Won't execute, so NULL is fine.
*/
counter = NULL;
}
# ifdef X64
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
saved_xcx = true;
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(counter)));
PRE(ilist, instr, INSTR_CREATE_inc(dcontext, OPND_CREATE_MEM32(REG_XCX, 0)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, OPND_CREATE_MEM32(REG_XCX, 0),
OPND_CREATE_INT_32OR8((int)thresh)));
# else
PRE(ilist, instr,
INSTR_CREATE_inc(dcontext, OPND_CREATE_ABSMEM(counter, OPSZ_4)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, OPND_CREATE_ABSMEM(counter, OPSZ_4),
OPND_CREATE_INT_32OR8(thresh)));
# endif
if (TEST(FRAG_WRITES_EFLAGS_6, flags) IF_X64(&&false)) {
jmp = INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
} else {
instr_t *past_threshold = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jcc_short(dcontext, OP_jl_short,
opnd_create_instr(past_threshold)));
# ifdef X64
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
# endif
if (!TEST(FRAG_WRITES_EFLAGS_6, flags)) {
ASSERT(restore_eflags_and_exit == NULL);
restore_eflags_and_exit = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jmp(dcontext,
opnd_create_instr(restore_eflags_and_exit)));
}
# ifdef X64
else {
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
}
# endif
PRE(ilist, instr, past_threshold);
}
}
if (!saved_xcx) {
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XSI, TLS_XBX_SLOT, XSI_OFFSET));
PRE(ilist, instr, SAVE_TO_DC_OR_TLS(dcontext, REG_XDI, TLS_XDX_SLOT, XDI_OFFSET));
DOSTATS({
if (GLOBAL_STATS_ON()) {
/* We only do global inc, not bothering w/ thread-private stats.
* We don't care about races: ballpark figure is good enough.
* We could do a direct inc of memory for 32-bit.
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(
dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(GLOBAL_STAT_ADDR(num_sandbox_execs))));
PRE(ilist, instr,
INSTR_CREATE_inc(
dcontext,
opnd_create_base_disp(REG_XSI, REG_NULL, 0, 0, OPSZ_STATS)));
}
});
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
/* will become copy start */
OPND_CREATE_INTPTR(start_pc)));
if (patchlist != NULL) {
ASSERT(copy_start_loc != NULL);
add_patch_marker(patchlist, instr_get_prev(instr), PATCH_ASSEMBLE_ABSOLUTE,
-(short)sizeof(cache_pc), (ptr_uint_t *)copy_start_loc);
}
PRE(ilist, instr, INSTR_CREATE_cmps_1(dcontext));
/* For a 1-byte copy size we cannot use "repe cmpsb" as it won't
* touch eflags and we'll treat the cmp results as cmpsb results, which
* doesn't work (cmp will never be equal)
*/
if (end_pc - start_pc > 1) {
instr_t *forward = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jcc(dcontext, OP_jne, opnd_create_instr(check_results)));
# ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSI),
opnd_create_reg(REG_XCX)));
} else {
# endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
# ifdef X64
}
# endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(end_pc - (start_pc + 1))));
/* i#2155: In the case where the direction flag is set, xsi will be lesser
* than start_pc after cmps, and the jump branch will not be taken.
*/
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(forward)));
/* i#2155: The immediate value is only psychological
* since it will be modified in finalize_selfmod_sandbox.
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
/* will become copy end */
OPND_CREATE_INTPTR(end_pc - 1)));
if (patchlist != NULL) {
ASSERT(copy_end_loc != NULL);
add_patch_marker(patchlist, instr_get_prev(instr), PATCH_ASSEMBLE_ABSOLUTE,
-(short)sizeof(cache_pc), (ptr_uint_t *)copy_end_loc);
}
/* i#2155: The next rep cmps comparison will be done backward,
* and thus it should be started at end_pc - 1
* because current basic block is [start_pc:end_pc-1].
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(end_pc - 1)));
PRE(ilist, instr, forward);
PRE(ilist, instr, INSTR_CREATE_rep_cmps_1(dcontext));
}
PRE(ilist, instr, check_results);
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XSI, TLS_XBX_SLOT, XSI_OFFSET));
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XDI, TLS_XDX_SLOT, XDI_OFFSET));
if (!TEST(FRAG_WRITES_EFLAGS_6, flags)) {
instr_t *start_bb = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, INSTR_CREATE_jcc(dcontext, OP_je, opnd_create_instr(start_bb)));
if (restore_eflags_and_exit != NULL) /* somebody needs this label */
PRE(ilist, instr, restore_eflags_and_exit);
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) &&
x86_to_x64_ibl_opt));
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
PRE(ilist, instr, start_bb);
} else {
jmp = INSTR_CREATE_jcc(dcontext, OP_jne, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
}
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls,
!use_tls _IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
/* fall-through to bb start */
}
/* returns false if failed to add sandboxing b/c of a problematic ilist --
* invalid instrs, elided ctis, etc.
*/
bool
insert_selfmod_sandbox(dcontext_t *dcontext, instrlist_t *ilist, uint flags,
app_pc start_pc, app_pc end_pc, /* end is open */
bool record_translation, bool for_cache)
{
instr_t *instr, *next;
if (!INTERNAL_OPTION(hw_cache_consistency))
return true; /* nothing to do */
/* this code assumes bb covers single, contiguous region */
ASSERT((flags & FRAG_HAS_DIRECT_CTI) == 0);
/* store first instr so loop below will skip top check */
instr = instrlist_first_expanded(dcontext, ilist);
instrlist_set_our_mangling(ilist, true); /* PR 267260 */
if (record_translation) {
/* skip client instrumentation, if any, as is done below */
while (instr != NULL && instr_is_meta(instr))
instr = instr_get_next_expanded(dcontext, ilist, instr);
/* make sure inserted instrs translate to the proper original instr */
ASSERT(instr != NULL && instr_get_translation(instr) != NULL);
instrlist_set_translation_target(ilist, instr_get_translation(instr));
}
sandbox_top_of_bb(dcontext, ilist, sandbox_top_of_bb_check_s2ro(dcontext, start_pc),
flags, start_pc, end_pc, for_cache, NULL, NULL, NULL);
if (INTERNAL_OPTION(sandbox_writes)) {
for (; instr != NULL; instr = next) {
int i, opcode;
opnd_t op;
opcode = instr_get_opcode(instr);
if (!instr_valid(instr)) {
/* invalid instr -- best to truncate block here, easiest way
* to do that and get all flags right is to re-build it,
* but this time we'll use full decode so we'll avoid the discrepancy
* between fast and full decode on invalid instr detection.
*/
if (record_translation)
instrlist_set_translation_target(ilist, NULL);
instrlist_set_our_mangling(ilist, false); /* PR 267260 */
return false;
}
/* don't mangle anything that mangle inserts! */
next = instr_get_next_expanded(dcontext, ilist, instr);
if (instr_is_meta(instr))
continue;
if (record_translation) {
/* make sure inserted instrs translate to the proper original instr */
ASSERT(instr_get_translation(instr) != NULL);
instrlist_set_translation_target(ilist, instr_get_translation(instr));
}
if (opcode == OP_rep_ins || opcode == OP_rep_movs || opcode == OP_rep_stos) {
sandbox_rep_instr(dcontext, ilist, instr, next, start_pc, end_pc);
continue;
}
/* FIXME case 8165: optimize for multiple push/pop */
for (i = 0; i < instr_num_dsts(instr); i++) {
op = instr_get_dst(instr, i);
if (opnd_is_memory_reference(op)) {
/* ignore CALL* since last anyways */
if (instr_is_call_indirect(instr)) {
ASSERT(next != NULL && !instr_raw_bits_valid(next));
/* FIXME case 8165: why do we ever care about the last
* instruction modifying anything?
*/
/* conversion of IAT calls (but not elision)
* transforms this into a direct CALL,
* in that case 'next' is a direct jmp
* fall through, so has no exit flags
*/
ASSERT(EXIT_IS_CALL(instr_exit_branch_type(next)) ||
(DYNAMO_OPTION(IAT_convert) &&
TEST(INSTR_IND_CALL_DIRECT, instr->flags)));
LOG(THREAD, LOG_INTERP, 3,
" ignoring CALL* at end of fragment\n");
/* This test could be done outside of this loop on
* destinations, but since it is rare it is faster
* to do it here. Using continue instead of break in case
* it gets moved out.
*/
continue;
}
if (opnd_is_abs_addr(op) IF_X64(|| opnd_is_rel_addr(op))) {
app_pc abs_addr = opnd_get_addr(op);
uint size = opnd_size_in_bytes(opnd_get_size(op));
if (!POINTER_OVERFLOW_ON_ADD(abs_addr, size) &&
(abs_addr + size < start_pc || abs_addr >= end_pc)) {
/* This is an absolute memory reference that points
* outside the current basic block and doesn't need
* sandboxing.
*/
continue;
}
}
sandbox_write(dcontext, ilist, instr, next, op, start_pc, end_pc);
}
}
}
}
if (record_translation)
instrlist_set_translation_target(ilist, NULL);
instrlist_set_our_mangling(ilist, false); /* PR 267260 */
return true;
}
/* Offsets within selfmod sandbox top-of-bb code that we patch once
* the code is emitted, as the values depend on the emitted address.
* These vary by whether sandbox_top_of_bb_check_s2ro() and whether
* eflags are not written, all written, or just OF is written.
* For the copy_size == 1 variation, we simply ignore the 2nd patch point.
*/
static bool selfmod_s2ro[] = { false, true };
static uint selfmod_eflags[] = { FRAG_WRITES_EFLAGS_6, FRAG_WRITES_EFLAGS_OF, 0 };
# define SELFMOD_NUM_S2RO (sizeof(selfmod_s2ro) / sizeof(selfmod_s2ro[0]))
# define SELFMOD_NUM_EFLAGS (sizeof(selfmod_eflags) / sizeof(selfmod_eflags[0]))
# ifdef X64 /* additional complexity: start_pc > 4GB? */
static app_pc selfmod_gt4G[] = { NULL, (app_pc)(POINTER_MAX - 2) /*so end can be +2*/ };
# define SELFMOD_NUM_GT4G (sizeof(selfmod_gt4G) / sizeof(selfmod_gt4G[0]))
# endif
uint selfmod_copy_start_offs[SELFMOD_NUM_S2RO][SELFMOD_NUM_EFLAGS] IF_X64([
SELFMOD_NUM_GT4G]);
uint selfmod_copy_end_offs[SELFMOD_NUM_S2RO][SELFMOD_NUM_EFLAGS] IF_X64([
SELFMOD_NUM_GT4G]);
void
set_selfmod_sandbox_offsets(dcontext_t *dcontext)
{
int i, j;
# ifdef X64
int k;
# endif
instrlist_t ilist;
patch_list_t patch;
static byte buf[256];
uint len;
/* We assume this is called at init, when .data is +w and we need no
* synch on accessing buf */
ASSERT(!dynamo_initialized);
for (i = 0; i < SELFMOD_NUM_S2RO; i++) {
for (j = 0; j < SELFMOD_NUM_EFLAGS; j++) {
# ifdef X64
for (k = 0; k < SELFMOD_NUM_GT4G; k++) {
# endif
cache_pc start_pc, end_pc;
app_pc app_start;
instr_t *inst;
instrlist_init(&ilist);
/* sandbox_top_of_bb assumes there's an instr there */
instrlist_append(&ilist, INSTR_CREATE_label(dcontext));
init_patch_list(&patch, PATCH_TYPE_ABSOLUTE);
app_start = IF_X64_ELSE(selfmod_gt4G[k], NULL);
sandbox_top_of_bb(dcontext, &ilist, selfmod_s2ro[i], selfmod_eflags[j],
/* we must have a >1-byte region to get
* both patch points */
app_start, app_start + 2, false, &patch, &start_pc,
&end_pc);
/* The exit cti's may not reachably encode (normally
* they'd be mangled away) so we munge them first
*/
for (inst = instrlist_first(&ilist); inst != NULL;
inst = instr_get_next(inst)) {
if (instr_is_exit_cti(inst)) {
instr_set_target(inst, opnd_create_pc(buf));
}
}
len = encode_with_patch_list(dcontext, &patch, &ilist, buf);
ASSERT(len < BUFFER_SIZE_BYTES(buf));
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(start_pc - buf)));
selfmod_copy_start_offs[i][j] IF_X64([k]) = (uint)(start_pc - buf);
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(end_pc - buf)));
selfmod_copy_end_offs[i][j] IF_X64([k]) = (uint)(end_pc - buf);
LOG(THREAD, LOG_EMIT, 3, "selfmod offs %d %d" IF_X64(" %d") ": %u %u\n",
i, j, IF_X64_(k) selfmod_copy_start_offs[i][j] IF_X64([k]),
selfmod_copy_end_offs[i][j] IF_X64([k]));
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
# ifdef X64
}
# endif
}
}
}
void
finalize_selfmod_sandbox(dcontext_t *dcontext, fragment_t *f)
{
cache_pc copy_pc = FRAGMENT_SELFMOD_COPY_PC(f);
byte *pc;
int i, j;
# ifdef X64
int k = ((ptr_uint_t)f->tag) > UINT_MAX ? 1 : 0;
# endif
i = (sandbox_top_of_bb_check_s2ro(dcontext, f->tag)) ? 1 : 0;
j = (TEST(FRAG_WRITES_EFLAGS_6, f->flags)
? 0
: (TEST(FRAG_WRITES_EFLAGS_OF, f->flags) ? 1 : 2));
pc = FCACHE_ENTRY_PC(f) + selfmod_copy_start_offs[i][j] IF_X64([k]);
/* The copy start gets updated after sandbox_top_of_bb. */
*((cache_pc *)pc) = copy_pc;
if (FRAGMENT_SELFMOD_COPY_CODE_SIZE(f) > 1) {
pc = FCACHE_ENTRY_PC(f) + selfmod_copy_end_offs[i][j] IF_X64([k]);
/* i#2155: The copy end gets updated.
* This value will be used in the case where the direction flag is set.
* It will then be the starting point for the backward repe cmps.
*/
*((cache_pc *)pc) = (copy_pc + FRAGMENT_SELFMOD_COPY_CODE_SIZE(f) - 1);
} /* else, no 2nd patch point */
}
#endif /* !STANDALONE_DECODER */
/***************************************************************************/
| 1 | 15,098 | No, we can't skip any of the mangling after the suspend point: this should follow the other mangling and undo its push. | DynamoRIO-dynamorio | c |
@@ -4,6 +4,7 @@
import flatbuffers
from flatbuffers.compat import import_numpy
+from tests.namespace_test.NamespaceC.TableInC import TableInC, TableInCT
np = import_numpy()
class SecondTableInA(object): | 1 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: NamespaceA
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SecondTableInA(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsSecondTableInA(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SecondTableInA()
x.Init(buf, n + offset)
return x
# SecondTableInA
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SecondTableInA
def ReferToC(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
obj = TableInC()
obj.Init(self._tab.Bytes, x)
return obj
return None
def SecondTableInAStart(builder): builder.StartObject(1)
def SecondTableInAAddReferToC(builder, referToC): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(referToC), 0)
def SecondTableInAEnd(builder): return builder.EndObject()
try:
from typing import Optional
except:
pass
class SecondTableInAT(object):
# SecondTableInAT
def __init__(self):
self.referToC = None # type: Optional[TableInCT]
@classmethod
def InitFromBuf(cls, buf, pos):
secondTableInA = SecondTableInA()
secondTableInA.Init(buf, pos)
return cls.InitFromObj(secondTableInA)
@classmethod
def InitFromObj(cls, secondTableInA):
x = SecondTableInAT()
x._UnPack(secondTableInA)
return x
# SecondTableInAT
def _UnPack(self, secondTableInA):
if secondTableInA is None:
return
if secondTableInA.ReferToC() is not None:
self.referToC = TableInCT.InitFromObj(secondTableInA.ReferToC())
# SecondTableInAT
def Pack(self, builder):
if self.referToC is not None:
referToC = self.referToC.Pack(builder)
SecondTableInAStart(builder)
if self.referToC is not None:
SecondTableInAAddReferToC(builder, referToC)
secondTableInA = SecondTableInAEnd(builder)
return secondTableInA
| 1 | 18,700 | this is generated code.. these changes will need to be made in the code generator to have them stick. | google-flatbuffers | java |
@@ -1,8 +1,9 @@
+/* global xit */
describe('hidden content', function () {
'use strict';
- var fixture = document.getElementById('fixture');
-
+ var fixture = document.getElementById('fixture');
+ var shadowSupport = document.body && typeof document.body.attachShadow === 'function';
var checkContext = {
_data: null,
data: function (d) { | 1 | describe('hidden content', function () {
'use strict';
var fixture = document.getElementById('fixture');
var checkContext = {
_data: null,
data: function (d) {
this._data = d;
}
};
afterEach(function () {
fixture.innerHTML = '';
checkContext._data = null;
});
it('should return undefined with display:none and children', function () {
fixture.innerHTML = '<div id="target" style="display: none;"><p>Some paragraph text.</p></div>';
var node = fixture.querySelector('#target');
assert.isUndefined(checks['hidden-content'].evaluate.call(checkContext, node));
});
it('should return undefined with visibility:hidden and children', function () {
fixture.innerHTML = '<div id="target" style="visibility: hidden;"><p>Some paragraph text.</p></div>';
var node = fixture.querySelector('#target');
assert.isUndefined(checks['hidden-content'].evaluate.call(checkContext, node));
});
it('should return true with visibility:hidden and parent with visibility:hidden', function () {
fixture.innerHTML = '<div style="visibility: hidden;"><p id="target" style="visibility: hidden;">Some paragraph text.</p></div>';
var node = fixture.querySelector('#target');
assert.isTrue(checks['hidden-content'].evaluate.call(checkContext, node));
});
it('should return true with aria-hidden and no content', function () {
fixture.innerHTML = '<span id="target" class="icon" aria-hidden="true"></span>';
var node = fixture.querySelector('#target');
assert.isTrue(checks['hidden-content'].evaluate.call(checkContext, node));
});
it('should skip whitelisted elements', function () {
var node = document.querySelector('head');
assert.isTrue(checks['hidden-content'].evaluate.call(checkContext, node));
});
});
| 1 | 11,245 | Should we abstract this into a reusable utility so it doesn't have to get repeated in every test file needing Shadow DOM support? | dequelabs-axe-core | js |
@@ -9,6 +9,7 @@ const ignoreNsNotFound = shared.ignoreNsNotFound;
const loadSpecTests = require('../spec').loadSpecTests;
const chai = require('chai');
const expect = chai.expect;
+const runUnifiedTest = require('./unified-spec-runner/runner').runUnifiedTest;
describe('APM', function() {
before(function() { | 1 | 'use strict';
const instrument = require('../..').instrument;
const shared = require('./shared');
const setupDatabase = shared.setupDatabase;
const filterForCommands = shared.filterForCommands;
const filterOutCommands = shared.filterOutCommands;
const ignoreNsNotFound = shared.ignoreNsNotFound;
const loadSpecTests = require('../spec').loadSpecTests;
const chai = require('chai');
const expect = chai.expect;
describe('APM', function() {
before(function() {
return setupDatabase(this.configuration);
});
it(
'should support legacy `instrument`/`uninstrument` methods with MongoClient.prototype.connect',
{
metadata: { requires: { topology: ['single', 'replicaset', 'sharded'] } },
// The actual test we wish to run
test: function() {
let started = [];
let succeeded = [];
const instrumentation = instrument();
instrumentation.on('started', filterForCommands('insert', started));
instrumentation.on('succeeded', filterForCommands('insert', succeeded));
let client = this.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
return client
.connect()
.then(client =>
client
.db(this.configuration.db)
.collection('apm_test')
.insertOne({ a: 1 })
)
.then(r => {
expect(r.insertedCount).to.equal(1);
expect(started.length).to.equal(1);
expect(started[0].commandName).to.equal('insert');
expect(started[0].command.insert).to.equal('apm_test');
expect(succeeded.length).to.equal(1);
instrumentation.uninstrument();
return client.close();
})
.then(() => {
started = [];
succeeded = [];
client = this.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
return client.connect();
})
.then(() =>
client
.db(this.configuration.db)
.collection('apm_test')
.insertOne({ a: 1 })
)
.then(r => {
expect(r.insertedCount).to.equal(1);
expect(started.length).to.equal(0);
expect(succeeded.length).to.equal(0);
return client.close();
});
}
}
);
it('should support legacy `instrument`/`uninstrument` methods with MongoClient `connect`', {
metadata: { requires: { topology: ['single', 'replicaset', 'sharded'] } },
// The actual test we wish to run
test: function() {
let started = [];
let succeeded = [];
const instrumentation = instrument();
instrumentation.on('started', filterForCommands('insert', started));
instrumentation.on('succeeded', filterForCommands('insert', succeeded));
const firstClient = this.configuration.newClient({}, { monitorCommands: true });
const secondClient = this.configuration.newClient({}, { monitorCommands: true });
return firstClient.connect().then(client => {
return client
.db(this.configuration.db)
.collection('apm_test')
.insertOne({ a: 1 })
.then(r => {
expect(r.insertedCount).to.equal(1);
expect(started.length).to.equal(1);
expect(started[0].commandName).to.equal('insert');
expect(started[0].command.insert).to.equal('apm_test');
expect(succeeded.length).to.equal(1);
instrumentation.uninstrument();
return client.close();
})
.then(() => {
started = [];
succeeded = [];
return secondClient.connect();
})
.then(newClient => {
return newClient
.db(this.configuration.db)
.collection('apm_test')
.insertOne({ a: 1 })
.then(r => {
expect(r.insertedCount).to.equal(1);
expect(started.length).to.equal(0);
expect(succeeded.length).to.equal(0);
return newClient.close();
});
});
});
}
});
it('should correctly receive the APM events for an insert', {
metadata: { requires: { topology: ['single', 'replicaset', 'sharded'] } },
// The actual test we wish to run
test: function() {
const started = [];
const succeeded = [];
const client = this.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
client.on('commandStarted', filterForCommands('insert', started));
client.on('commandSucceeded', filterForCommands('insert', succeeded));
return client
.connect()
.then(client =>
client
.db(this.configuration.db)
.collection('apm_test')
.insertOne({ a: 1 })
)
.then(r => {
expect(r.insertedCount).to.equal(1);
expect(started.length).to.equal(1);
expect(started[0].commandName).to.equal('insert');
expect(started[0].command.insert).to.equal('apm_test');
expect(succeeded.length).to.equal(1);
return client.close();
});
}
});
it('should correctly handle cursor.close when no cursor existed', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const started = [];
const succeeded = [];
const self = this;
const client = this.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
client.on('commandStarted', filterForCommands('insert', started));
client.on('commandSucceeded', filterForCommands('insert', succeeded));
return client.connect().then(client => {
const db = client.db(self.configuration.db);
const collection = db.collection('apm_test_cursor');
return collection.insertMany([{ a: 1 }, { a: 2 }, { a: 3 }]).then(r => {
expect(r.insertedCount).to.equal(3);
const cursor = collection.find({});
return cursor.count().then(() => {
cursor.close(); // <-- Will cause error in APM module.
return client.close();
});
});
});
}
});
it('should correctly receive the APM events for a listCollections command', {
metadata: { requires: { topology: ['replicaset'], mongodb: '>=3.0.0' } },
// The actual test we wish to run
test: function() {
const self = this;
const ReadPreference = self.configuration.require.ReadPreference;
const started = [];
const succeeded = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
client.on('commandStarted', filterForCommands('listCollections', started));
client.on('commandSucceeded', filterForCommands('listCollections', succeeded));
return client.connect().then(client => {
const db = client.db(self.configuration.db);
return db
.collection('apm_test_list_collections')
.insertOne({ a: 1 }, self.configuration.writeConcernMax())
.then(r => {
expect(r.insertedCount).to.equal(1);
return db.listCollections({}, { readPreference: ReadPreference.PRIMARY }).toArray();
})
.then(() =>
db.listCollections({}, { readPreference: ReadPreference.SECONDARY }).toArray()
)
.then(() => {
expect(started).to.have.lengthOf(2);
if (self.configuration.usingUnifiedTopology()) {
expect(started[0])
.property('address')
.to.not.equal(started[1].address);
} else {
// Ensure command was not sent to the primary
expect(started[0].connectionId).to.not.equal(started[1].connectionId);
}
return client.close();
});
});
}
});
it('should correctly receive the APM events for a listIndexes command', {
metadata: { requires: { topology: ['replicaset'], mongodb: '>=3.0.0' } },
// The actual test we wish to run
test: function() {
const self = this;
const ReadPreference = self.configuration.require.ReadPreference;
const started = [];
const succeeded = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['listIndexes', 'find'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
return client.connect().then(() => {
const db = client.db(self.configuration.db);
return db
.collection('apm_test_list_collections')
.insertOne({ a: 1 }, self.configuration.writeConcernMax())
.then(r => {
expect(r.insertedCount).to.equal(1);
return db
.collection('apm_test_list_collections')
.listIndexes({ readPreference: ReadPreference.PRIMARY })
.toArray();
})
.then(() =>
db
.collection('apm_test_list_collections')
.listIndexes({ readPreference: ReadPreference.SECONDARY })
.toArray()
)
.then(() => {
expect(started).to.have.lengthOf(2);
// Ensure command was not sent to the primary
if (self.configuration.usingUnifiedTopology()) {
expect(started[0])
.property('address')
.to.not.equal(started[1].address);
} else {
expect(started[0].connectionId).to.not.equal(started[1].connectionId);
}
return client.close();
});
});
}
});
it.skip(
'should correctly receive the APM events for an insert using custom operationId and time generator',
{
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const self = this;
const started = [];
const succeeded = [];
const callbackTriggered = false;
// testListener = require('../..').instrument(
// {
// operationIdGenerator: {
// next: function() {
// return 10000;
// }
// },
// timestampGenerator: {
// current: function() {
// return 1;
// },
// duration: function(start, end) {
// return end - start;
// }
// }
// },
// function(err) {
// expect(err).to.be.null;
// callbackTriggered = true;
// }
// );
// testListener.on('started', function(event) {
// if (event.commandName === 'insert') started.push(event);
// });
// testListener.on('succeeded', function(event) {
// if (event.commandName === 'insert') succeeded.push(event);
// });
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
return client.connect().then(client => {
const db = client.db(self.configuration.db);
return db
.collection('apm_test_1')
.insertOne({ a: 1 })
.then(() => {
expect(started).to.have.length(1);
expect(succeeded).to.have.length(1);
expect(started[0].commandName).to.equal('insert');
expect(started[0].command.insert).to.equal('apm_test_1');
expect(started[0].operationId).to.equal(10000);
expect(succeeded[0].duration).to.equal(0);
expect(callbackTriggered).to.be.true;
// testListener.uninstrument();
return client.close();
});
});
}
}
);
it('should correctly receive the APM events for a find with getmore and killcursor', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const self = this;
const ReadPreference = self.configuration.require.ReadPreference;
const started = [];
const succeeded = [];
const failed = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['find', 'getMore', 'killCursors'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
client.on('commandFailed', filterForCommands(desiredEvents, failed));
return client.connect().then(client => {
const db = client.db(self.configuration.db);
// Drop the collection
return db
.collection('apm_test_2')
.drop()
.catch(ignoreNsNotFound)
.then(() => {
// Insert test documents
return db
.collection('apm_test_2')
.insertMany([{ a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }], { w: 1 });
})
.then(r => {
expect(r.insertedCount).to.equal(6);
return db
.collection('apm_test_2')
.find({ a: 1 })
.project({ _id: 1, a: 1 })
.hint({ _id: 1 })
.skip(1)
.limit(100)
.batchSize(2)
.comment('some comment')
.maxTimeMS(5000)
.setReadPreference(ReadPreference.PRIMARY)
.addCursorFlag('noCursorTimeout', true)
.toArray();
})
.then(docs => {
// Assert basic documents
expect(docs).to.have.length(5);
expect(started).to.have.length(3);
expect(succeeded).to.have.length(3);
expect(failed).to.have.length(0);
// Success messages
expect(succeeded[0].reply).to.not.be.null;
expect(succeeded[0].operationId).to.equal(succeeded[1].operationId);
expect(succeeded[0].operationId).to.equal(succeeded[2].operationId);
expect(succeeded[1].reply).to.not.be.null;
expect(succeeded[2].reply).to.not.be.null;
// Started
expect(started[0].operationId).to.equal(started[1].operationId);
expect(started[0].operationId).to.equal(started[2].operationId);
return client.close();
});
});
}
});
it('should correctly receive the APM failure event for find', {
metadata: { requires: { topology: ['single', 'replicaset'], mongodb: '>=2.6.0' } },
// The actual test we wish to run
test: function() {
const self = this;
const ReadPreference = self.configuration.require.ReadPreference;
const started = [];
const succeeded = [];
const failed = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['find', 'getMore', 'killCursors'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
client.on('commandFailed', filterForCommands(desiredEvents, failed));
return client.connect().then(client => {
const db = client.db(self.configuration.db);
// Drop the collection
return db
.collection('apm_test_2')
.drop()
.catch(ignoreNsNotFound)
.then(() => {
// Insert test documents
return db
.collection('apm_test_2')
.insertMany([{ a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }]);
})
.then(r => {
expect(r.insertedCount).to.equal(6);
return db
.collection('apm_test_2')
.find({ $illegalfield: 1 })
.project({ _id: 1, a: 1 })
.hint({ _id: 1 })
.skip(1)
.limit(100)
.batchSize(2)
.comment('some comment')
.maxTimeMS(5000)
.setReadPreference(ReadPreference.PRIMARY)
.addCursorFlag('noCursorTimeout', true)
.toArray();
})
.then(() => {
throw new Error('this should not happen');
})
.catch(() => {
expect(failed).to.have.length(1);
return client.close();
});
});
}
});
it('should correctly receive the APM events for a bulk operation', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const self = this;
const started = [];
const succeeded = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['insert', 'update', 'delete'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
return client.connect().then(client => {
const db = client.db(self.configuration.db);
return db
.collection('apm_test_3')
.bulkWrite(
[
{ insertOne: { a: 1 } },
{ updateOne: { q: { a: 2 }, u: { $set: { a: 2 } }, upsert: true } },
{ deleteOne: { q: { c: 1 } } }
],
{ ordered: true }
)
.then(() => {
expect(started).to.have.length(3);
expect(succeeded).to.have.length(3);
expect(started[0].operationId).to.equal(started[1].operationId);
expect(started[0].operationId).to.equal(started[2].operationId);
expect(succeeded[0].operationId).to.equal(succeeded[1].operationId);
expect(succeeded[0].operationId).to.equal(succeeded[2].operationId);
return client.close();
});
});
}
});
it('should correctly receive the APM explain command', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
test: function() {
const self = this;
const started = [];
const succeeded = [];
const failed = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['find', 'getMore', 'killCursors', 'explain'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
client.on('commandFailed', filterForCommands(desiredEvents, failed));
return client.connect().then(client => {
const db = client.db(self.configuration.db);
return db
.collection('apm_test_2')
.drop()
.catch(ignoreNsNotFound)
.then(() =>
db
.collection('apm_test_2')
.insertMany([{ a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }, { a: 1 }], { w: 1 })
)
.then(r => {
expect(r.insertedCount).to.equal(6);
return db
.collection('apm_test_2')
.find({ a: 1 })
.explain();
})
.then(explain => {
expect(explain).to.not.be.null;
expect(started).to.have.length(1);
expect(started[0].commandName).to.equal('explain');
expect(started[0].command.explain.find).to.equal('apm_test_2');
expect(succeeded).to.have.length(1);
expect(succeeded[0].commandName).to.equal('explain');
expect(started[0].operationId).to.equal(succeeded[0].operationId);
return client.close();
});
});
}
});
it('should correctly filter out sensitive commands', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const self = this;
const started = [];
const succeeded = [];
const failed = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['getnonce'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
client.on('commandFailed', filterForCommands(desiredEvents, failed));
return client
.connect()
.then(client => client.db(self.configuration.db).command({ getnonce: true }))
.then(r => {
expect(r).to.exist;
expect(started).to.have.length(1);
expect(succeeded).to.have.length(1);
expect(failed).to.have.length(0);
expect(started[0].commandObj).to.eql({ getnonce: true });
expect(succeeded[0].reply).to.eql({});
return client.close();
});
}
});
it('should correctly receive the APM events for an updateOne', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const self = this;
const started = [];
const succeeded = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['update'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
return client
.connect()
.then(client =>
client
.db(self.configuration.db)
.collection('apm_test_u_1')
.updateOne({ a: 1 }, { $set: { b: 1 } }, { upsert: true })
)
.then(r => {
expect(r).to.exist;
expect(started).to.have.length(1);
expect(started[0].commandName).to.equal('update');
expect(started[0].command.update).to.equal('apm_test_u_1');
expect(succeeded).to.have.length(1);
return client.close();
});
}
});
it('should correctly receive the APM events for an updateMany', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const self = this;
const started = [];
const succeeded = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['update'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
return client
.connect()
.then(client =>
client
.db(self.configuration.db)
.collection('apm_test_u_2')
.updateMany({ a: 1 }, { $set: { b: 1 } }, { upsert: true })
)
.then(r => {
expect(r).to.exist;
expect(started).to.have.length(1);
expect(started[0].commandName).to.equal('update');
expect(started[0].command.update).to.equal('apm_test_u_2');
expect(succeeded).to.have.length(1);
return client.close();
});
}
});
it('should correctly receive the APM events for deleteOne', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function() {
const self = this;
const started = [];
const succeeded = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['delete'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
return client
.connect()
.then(client =>
client
.db(self.configuration.db)
.collection('apm_test_u_3')
.deleteOne({ a: 1 })
)
.then(r => {
expect(r).to.exist;
expect(started).to.have.length(1);
expect(started[0].commandName).to.equal('delete');
expect(started[0].command.delete).to.equal('apm_test_u_3');
expect(succeeded).to.have.length(1);
return client.close();
});
}
});
it('should ensure killcursor commands are sent on 3.0 or earlier when APM is enabled', {
metadata: { requires: { topology: ['single', 'replicaset'], mongodb: '<=3.0.x' } },
// The actual test we wish to run
test: function() {
const self = this;
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
return client.connect().then(client => {
const db = client.db(self.configuration.db);
const admindb = db.admin();
let cursorCountBefore;
let cursorCountAfter;
const collection = db.collection('apm_killcursor_tests');
// make sure collection has records (more than 2)
return collection
.insertMany([{ a: 1 }, { a: 2 }, { a: 3 }])
.then(r => {
expect(r).to.exist;
return admindb.serverStatus();
})
.then(result => {
cursorCountBefore = result.cursors.clientCursors_size;
let cursor = collection.find({}).limit(2);
return cursor.toArray().then(r => {
expect(r).to.exist;
return cursor.close();
});
})
.then(() => admindb.serverStatus())
.then(result => {
cursorCountAfter = result.cursors.clientCursors_size;
expect(cursorCountBefore).to.equal(cursorCountAfter);
return client.close();
});
});
}
});
it('should correctly decorate the apm result for aggregation with cursorId', {
metadata: { requires: { topology: ['single', 'replicaset'], mongodb: '>=3.0.0' } },
// The actual test we wish to run
test: function() {
const self = this;
const started = [];
const succeeded = [];
// Generate docs
const docs = [];
for (let i = 0; i < 2500; i++) docs.push({ a: i });
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['aggregate', 'getMore'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
return client.connect().then(() => {
const db = client.db(self.configuration.db);
return db
.collection('apm_test_u_4')
.drop()
.catch(ignoreNsNotFound)
.then(() => db.collection('apm_test_u_4').insertMany(docs))
.then(r => {
expect(r).to.exist;
return db
.collection('apm_test_u_4')
.aggregate([{ $match: {} }])
.toArray();
})
.then(r => {
expect(r).to.exist;
expect(started).to.have.length(4);
expect(succeeded).to.have.length(4);
const cursors = succeeded.map(x => x.reply.cursor);
// Check we have a cursor
expect(cursors[0].id).to.exist;
expect(cursors[0].id.toString()).to.equal(cursors[1].id.toString());
expect(cursors[3].id.toString()).to.equal('0');
return client.close();
});
});
}
});
it('should correctly decorate the apm result for listCollections with cursorId', {
metadata: { requires: { topology: ['single', 'replicaset'], mongodb: '>=3.0.0' } },
test: function() {
const self = this;
const started = [];
const succeeded = [];
const client = self.configuration.newClient(
{ w: 1 },
{ poolSize: 1, auto_reconnect: false, monitorCommands: true }
);
const desiredEvents = ['listCollections'];
client.on('commandStarted', filterForCommands(desiredEvents, started));
client.on('commandSucceeded', filterForCommands(desiredEvents, succeeded));
return client.connect().then(client => {
const db = client.db(self.configuration.db);
const promises = [];
for (let i = 0; i < 20; i++) {
promises.push(db.collection('_mass_collection_' + i).insertOne({ a: 1 }));
}
return Promise.all(promises)
.then(r => {
expect(r).to.exist;
return db
.listCollections()
.batchSize(10)
.toArray();
})
.then(r => {
expect(r).to.exist;
expect(started).to.have.length(1);
expect(succeeded).to.have.length(1);
const cursors = succeeded.map(x => x.reply.cursor);
expect(cursors[0].id).to.exist;
return client.close();
});
});
}
});
describe('spec tests', function() {
before(function() {
return setupDatabase(this.configuration);
});
// TODO: The worst part about this custom validation method is that it does not
// provide the rich context of failure location that chai gives us out of
// the box. I investigated extending chai, however their internal implementation
// does not reuse other internal methods, so we'd have to bring lodash in.
// It may be worth seeing if we can improve on this, as we might need the
// behavior in other future YAML tests.
const maybeLong = val => (typeof val.equals === 'function' ? val.toNumber() : val);
function apmExpect(actual, expected, parentKey) {
Object.keys(expected).forEach(key => {
expect(actual).to.include.key(key);
if (Array.isArray(expected[key])) {
expect(actual[key]).to.be.instanceOf(Array);
expect(actual[key]).to.have.lengthOf(expected[key].length);
for (let i = 0; i < expected[key].length; ++i) {
apmExpect(actual[key][i], expected[key][i], key);
}
return;
}
if (expected[key] === 42 || expected[key] === '42' || expected[key] === '') {
if (key === 'code' && expected[key] === 42) {
expect(actual[key]).to.be.greaterThan(0);
}
if (key === 'errmsg' && expected[key] === '') {
expect(actual[key]).to.have.lengthOf.at.least(1); // >= 1
}
if (key === 'getmore' || (parentKey === 'cursor' && key === 'id')) {
expect(maybeLong(actual[key])).to.be.greaterThan(0);
}
return;
}
// cheap isPlainObject clone
if (Object.prototype.toString.call(expected[key]) === '[object Object]') {
apmExpect(actual[key], expected[key], key);
return;
}
// otherwise compare the values
expect(maybeLong(actual[key])).to.deep.equal(expected[key]);
});
}
function validateCommandStartedEvent(expected, event) {
expect(event.commandName).to.equal(expected.command_name);
expect(event.databaseName).to.equal(expected.database_name);
apmExpect(event.command, expected.command);
}
function validateCommandSucceededEvent(expected, event) {
expect(event.commandName).to.equal(expected.command_name);
apmExpect(event.reply, expected.reply);
}
function validateCommandFailedEvent(expected, event) {
expect(event.commandName).to.equal(expected.command_name);
}
function validateExpectations(expectation, results) {
if (expectation.command_started_event) {
validateCommandStartedEvent(expectation.command_started_event, results.starts.shift());
} else if (expectation.command_succeeded_event) {
validateCommandSucceededEvent(
expectation.command_succeeded_event,
results.successes.shift()
);
} else if (expectation.command_failed_event) {
validateCommandFailedEvent(expectation.command_failed_event, results.failures.shift());
}
}
function executeOperation(client, scenario, test) {
// Get the operation
const operation = test.operation;
// Get the command name
const commandName = operation.name;
// Get the arguments
const args = operation.arguments || {};
// Get the database instance
const db = client.db(scenario.database_name);
// Get the collection
const collection = db.collection(scenario.collection_name);
// Parameters
const params = [];
// Options
let options = null;
// Get the data
const data = scenario.data;
// Command Monitoring context
const monitoringResults = {
successes: [],
failures: [],
starts: []
};
// Drop the collection
return collection
.drop()
.catch(err => {
// potentially skip this error
if (!err.message.match(/ns not found/)) throw err;
})
.then(() => collection.insertMany(data))
.then(r => {
expect(data).to.have.length(r.insertedCount);
// Set up the listeners
client.on(
'commandStarted',
filterOutCommands(['ismaster', 'endSessions'], monitoringResults.starts)
);
client.on(
'commandFailed',
filterOutCommands(['ismaster', 'endSessions'], monitoringResults.failures)
);
client.on(
'commandSucceeded',
filterOutCommands(['ismaster', 'endSessions'], monitoringResults.successes)
);
// Unpack the operation
if (args.filter) params.push(args.filter);
if (args.deletes) params.push(args.deletes);
if (args.document) params.push(args.document);
if (args.documents) params.push(args.documents);
if (args.update) params.push(args.update);
if (args.requests) params.push(args.requests);
if (args.writeConcern) {
if (options == null) {
options = args.writeConcern;
} else {
for (let name in args.writeConcern) {
options[name] = args.writeConcern[name];
}
}
}
if (typeof args.ordered === 'boolean') {
if (options == null) {
options = { ordered: args.ordered };
} else {
options.ordered = args.ordered;
}
}
if (typeof args.upsert === 'boolean') {
if (options == null) {
options = { upsert: args.upsert };
} else {
options.upsert = args.upsert;
}
}
// Find command is special needs to executed using toArray
if (operation.name === 'find') {
let cursor = collection[commandName]();
// Set the options
if (args.filter) cursor = cursor.filter(args.filter);
if (args.batchSize) cursor = cursor.batchSize(args.batchSize);
if (args.limit) cursor = cursor.limit(args.limit);
if (args.skip) cursor = cursor.skip(args.skip);
if (args.sort) cursor = cursor.sort(args.sort);
// Set any modifiers
if (args.modifiers) {
for (let modifier in args.modifiers) {
cursor.addQueryModifier(modifier, args.modifiers[modifier]);
}
}
// Execute find
return cursor
.toArray()
.catch(() => {} /* ignore */)
.then(() =>
test.expectations.forEach(expectation =>
validateExpectations(expectation, monitoringResults)
)
);
}
// Add options if they exists
if (options) params.push(options);
// Execute the operation
const promise = collection[commandName].apply(collection, params);
return promise
.catch(() => {} /* ignore */)
.then(() =>
test.expectations.forEach(expectation =>
validateExpectations(expectation, monitoringResults)
)
);
});
}
loadSpecTests('apm').forEach(scenario => {
describe(scenario.name, function() {
scenario.tests.forEach(test => {
const requirements = { topology: ['single', 'replicaset', 'sharded'] };
if (test.ignore_if_server_version_greater_than) {
requirements.mongodb = `<${test.ignore_if_server_version_greater_than}`;
} else if (test.ignore_if_server_version_less_than) {
requirements.mongodb = `>${test.ignore_if_server_version_less_than}`;
}
if (test.ignore_if_topology_type) {
requirements.topology = requirements.topology.filter(
top => test.ignore_if_topology_type.indexOf(top) < 0
);
}
it(test.description, {
metadata: { requires: requirements },
test: function() {
const client = this.configuration.newClient({}, { monitorCommands: true });
return client.connect().then(client => {
expect(client).to.exist;
return executeOperation(client, scenario, test).then(() => client.close());
});
}
});
});
});
});
});
});
| 1 | 20,020 | Maybe we rename this to `command_monitoring.test.js` to match the directory name change? Or would you rather do that as part of the greater test cleanup? I'm fine either way. | mongodb-node-mongodb-native | js |
@@ -577,7 +577,7 @@ SchemaString.prototype.$conditionalHandlers =
$gte: handleSingle,
$lt: handleSingle,
$lte: handleSingle,
- $options: handleSingle,
+ $options: String,
$regex: handleSingle,
$not: handleSingle
}); | 1 | 'use strict';
/*!
* Module dependencies.
*/
const SchemaType = require('../schematype');
const CastError = SchemaType.CastError;
const MongooseError = require('../error');
const castString = require('../cast/string');
const utils = require('../utils');
let Document;
/**
* String SchemaType constructor.
*
* @param {String} key
* @param {Object} options
* @inherits SchemaType
* @api public
*/
function SchemaString(key, options) {
this.enumValues = [];
this.regExp = null;
SchemaType.call(this, key, options, 'String');
}
/**
* This schema type's name, to defend against minifiers that mangle
* function names.
*
* @api public
*/
SchemaString.schemaName = 'String';
/*!
* Inherits from SchemaType.
*/
SchemaString.prototype = Object.create(SchemaType.prototype);
SchemaString.prototype.constructor = SchemaString;
/*!
* ignore
*/
SchemaString._cast = castString;
/**
* Get/set the function used to cast arbitrary values to strings.
*
* ####Example:
*
* // Throw an error if you pass in an object. Normally, Mongoose allows
* // objects with custom `toString()` functions.
* const original = mongoose.Schema.Types.String.cast();
* mongoose.Schema.Types.String.cast(v => {
* assert.ok(v == null || typeof v !== 'object');
* return original(v);
* });
*
* // Or disable casting entirely
* mongoose.Schema.Types.String.cast(false);
*
* @param {Function} caster
* @return {Function}
* @function get
* @static
* @api public
*/
SchemaString.cast = function cast(caster) {
if (arguments.length === 0) {
return this._cast;
}
if (caster === false) {
caster = v => {
if (v != null && typeof v !== 'string') {
throw new Error();
}
return v;
};
}
this._cast = caster;
return this._cast;
};
/**
* Attaches a getter for all String instances.
*
* ####Example:
*
* // Make all numbers round down
* mongoose.Schema.String.get(v => v.toLowerCase());
*
* const Model = mongoose.model('Test', new Schema({ test: String }));
* new Model({ test: 'FOO' }).test; // 'foo'
*
* @param {Function} getter
* @return {this}
* @function get
* @static
* @api public
*/
SchemaString.get = SchemaType.get;
/*!
* ignore
*/
SchemaString._checkRequired = v => (v instanceof String || typeof v === 'string') && v.length;
/**
* Override the function the required validator uses to check whether a string
* passes the `required` check.
*
* ####Example:
*
* // Allow empty strings to pass `required` check
* mongoose.Schema.Types.String.checkRequired(v => v != null);
*
* const M = mongoose.model({ str: { type: String, required: true } });
* new M({ str: '' }).validateSync(); // `null`, validation passes!
*
* @param {Function} fn
* @return {Function}
* @function checkRequired
* @static
* @api public
*/
SchemaString.checkRequired = SchemaType.checkRequired;
/**
* Adds an enum validator
*
* ####Example:
*
* var states = ['opening', 'open', 'closing', 'closed']
* var s = new Schema({ state: { type: String, enum: states }})
* var M = db.model('M', s)
* var m = new M({ state: 'invalid' })
* m.save(function (err) {
* console.error(String(err)) // ValidationError: `invalid` is not a valid enum value for path `state`.
* m.state = 'open'
* m.save(callback) // success
* })
*
* // or with custom error messages
* var enum = {
* values: ['opening', 'open', 'closing', 'closed'],
* message: 'enum validator failed for path `{PATH}` with value `{VALUE}`'
* }
* var s = new Schema({ state: { type: String, enum: enum })
* var M = db.model('M', s)
* var m = new M({ state: 'invalid' })
* m.save(function (err) {
* console.error(String(err)) // ValidationError: enum validator failed for path `state` with value `invalid`
* m.state = 'open'
* m.save(callback) // success
* })
*
* @param {String|Object} [args...] enumeration values
* @return {SchemaType} this
* @see Customized Error Messages #error_messages_MongooseError-messages
* @api public
*/
SchemaString.prototype.enum = function() {
if (this.enumValidator) {
this.validators = this.validators.filter(function(v) {
return v.validator !== this.enumValidator;
}, this);
this.enumValidator = false;
}
if (arguments[0] === void 0 || arguments[0] === false) {
return this;
}
let values;
let errorMessage;
if (utils.isObject(arguments[0])) {
values = arguments[0].values;
errorMessage = arguments[0].message;
} else {
values = arguments;
errorMessage = MongooseError.messages.String.enum;
}
for (let i = 0; i < values.length; i++) {
if (undefined !== values[i]) {
this.enumValues.push(this.cast(values[i]));
}
}
const vals = this.enumValues;
this.enumValidator = function(v) {
return undefined === v || ~vals.indexOf(v);
};
this.validators.push({
validator: this.enumValidator,
message: errorMessage,
type: 'enum',
enumValues: vals
});
return this;
};
/**
* Adds a lowercase [setter](http://mongoosejs.com/docs/api.html#schematype_SchemaType-set).
*
* ####Example:
*
* var s = new Schema({ email: { type: String, lowercase: true }})
* var M = db.model('M', s);
* var m = new M({ email: '[email protected]' });
* console.log(m.email) // [email protected]
* M.find({ email: '[email protected]' }); // Queries by '[email protected]'
*
* @api public
* @return {SchemaType} this
*/
SchemaString.prototype.lowercase = function(shouldApply) {
if (arguments.length > 0 && !shouldApply) {
return this;
}
return this.set(function(v, self) {
if (typeof v !== 'string') {
v = self.cast(v);
}
if (v) {
return v.toLowerCase();
}
return v;
});
};
/**
* Adds an uppercase [setter](http://mongoosejs.com/docs/api.html#schematype_SchemaType-set).
*
* ####Example:
*
* var s = new Schema({ caps: { type: String, uppercase: true }})
* var M = db.model('M', s);
* var m = new M({ caps: 'an example' });
* console.log(m.caps) // AN EXAMPLE
* M.find({ caps: 'an example' }) // Matches documents where caps = 'AN EXAMPLE'
*
* @api public
* @return {SchemaType} this
*/
SchemaString.prototype.uppercase = function(shouldApply) {
if (arguments.length > 0 && !shouldApply) {
return this;
}
return this.set(function(v, self) {
if (typeof v !== 'string') {
v = self.cast(v);
}
if (v) {
return v.toUpperCase();
}
return v;
});
};
/**
* Adds a trim [setter](http://mongoosejs.com/docs/api.html#schematype_SchemaType-set).
*
* The string value will be trimmed when set.
*
* ####Example:
*
* var s = new Schema({ name: { type: String, trim: true }})
* var M = db.model('M', s)
* var string = ' some name '
* console.log(string.length) // 11
* var m = new M({ name: string })
* console.log(m.name.length) // 9
*
* @api public
* @return {SchemaType} this
*/
SchemaString.prototype.trim = function(shouldTrim) {
if (arguments.length > 0 && !shouldTrim) {
return this;
}
return this.set(function(v, self) {
if (typeof v !== 'string') {
v = self.cast(v);
}
if (v) {
return v.trim();
}
return v;
});
};
/**
* Sets a minimum length validator.
*
* ####Example:
*
* var schema = new Schema({ postalCode: { type: String, minlength: 5 })
* var Address = db.model('Address', schema)
* var address = new Address({ postalCode: '9512' })
* address.save(function (err) {
* console.error(err) // validator error
* address.postalCode = '95125';
* address.save() // success
* })
*
* // custom error messages
* // We can also use the special {MINLENGTH} token which will be replaced with the minimum allowed length
* var minlength = [5, 'The value of path `{PATH}` (`{VALUE}`) is shorter than the minimum allowed length ({MINLENGTH}).'];
* var schema = new Schema({ postalCode: { type: String, minlength: minlength })
* var Address = mongoose.model('Address', schema);
* var address = new Address({ postalCode: '9512' });
* address.validate(function (err) {
* console.log(String(err)) // ValidationError: The value of path `postalCode` (`9512`) is shorter than the minimum length (5).
* })
*
* @param {Number} value minimum string length
* @param {String} [message] optional custom error message
* @return {SchemaType} this
* @see Customized Error Messages #error_messages_MongooseError-messages
* @api public
*/
SchemaString.prototype.minlength = function(value, message) {
if (this.minlengthValidator) {
this.validators = this.validators.filter(function(v) {
return v.validator !== this.minlengthValidator;
}, this);
}
if (value !== null && value !== undefined) {
let msg = message || MongooseError.messages.String.minlength;
msg = msg.replace(/{MINLENGTH}/, value);
this.validators.push({
validator: this.minlengthValidator = function(v) {
return v === null || v.length >= value;
},
message: msg,
type: 'minlength',
minlength: value
});
}
return this;
};
/**
* Sets a maximum length validator.
*
* ####Example:
*
* var schema = new Schema({ postalCode: { type: String, maxlength: 9 })
* var Address = db.model('Address', schema)
* var address = new Address({ postalCode: '9512512345' })
* address.save(function (err) {
* console.error(err) // validator error
* address.postalCode = '95125';
* address.save() // success
* })
*
* // custom error messages
* // We can also use the special {MAXLENGTH} token which will be replaced with the maximum allowed length
* var maxlength = [9, 'The value of path `{PATH}` (`{VALUE}`) exceeds the maximum allowed length ({MAXLENGTH}).'];
* var schema = new Schema({ postalCode: { type: String, maxlength: maxlength })
* var Address = mongoose.model('Address', schema);
* var address = new Address({ postalCode: '9512512345' });
* address.validate(function (err) {
* console.log(String(err)) // ValidationError: The value of path `postalCode` (`9512512345`) exceeds the maximum allowed length (9).
* })
*
* @param {Number} value maximum string length
* @param {String} [message] optional custom error message
* @return {SchemaType} this
* @see Customized Error Messages #error_messages_MongooseError-messages
* @api public
*/
SchemaString.prototype.maxlength = function(value, message) {
if (this.maxlengthValidator) {
this.validators = this.validators.filter(function(v) {
return v.validator !== this.maxlengthValidator;
}, this);
}
if (value !== null && value !== undefined) {
let msg = message || MongooseError.messages.String.maxlength;
msg = msg.replace(/{MAXLENGTH}/, value);
this.validators.push({
validator: this.maxlengthValidator = function(v) {
return v === null || v.length <= value;
},
message: msg,
type: 'maxlength',
maxlength: value
});
}
return this;
};
/**
* Sets a regexp validator.
*
* Any value that does not pass `regExp`.test(val) will fail validation.
*
* ####Example:
*
* var s = new Schema({ name: { type: String, match: /^a/ }})
* var M = db.model('M', s)
* var m = new M({ name: 'I am invalid' })
* m.validate(function (err) {
* console.error(String(err)) // "ValidationError: Path `name` is invalid (I am invalid)."
* m.name = 'apples'
* m.validate(function (err) {
* assert.ok(err) // success
* })
* })
*
* // using a custom error message
* var match = [ /\.html$/, "That file doesn't end in .html ({VALUE})" ];
* var s = new Schema({ file: { type: String, match: match }})
* var M = db.model('M', s);
* var m = new M({ file: 'invalid' });
* m.validate(function (err) {
* console.log(String(err)) // "ValidationError: That file doesn't end in .html (invalid)"
* })
*
* Empty strings, `undefined`, and `null` values always pass the match validator. If you require these values, enable the `required` validator also.
*
* var s = new Schema({ name: { type: String, match: /^a/, required: true }})
*
* @param {RegExp} regExp regular expression to test against
* @param {String} [message] optional custom error message
* @return {SchemaType} this
* @see Customized Error Messages #error_messages_MongooseError-messages
* @api public
*/
SchemaString.prototype.match = function match(regExp, message) {
// yes, we allow multiple match validators
const msg = message || MongooseError.messages.String.match;
const matchValidator = function(v) {
if (!regExp) {
return false;
}
const ret = ((v != null && v !== '')
? regExp.test(v)
: true);
return ret;
};
this.validators.push({
validator: matchValidator,
message: msg,
type: 'regexp',
regexp: regExp
});
return this;
};
/**
* Check if the given value satisfies the `required` validator. The value is
* considered valid if it is a string (that is, not `null` or `undefined`) and
* has positive length. The `required` validator **will** fail for empty
* strings.
*
* @param {Any} value
* @param {Document} doc
* @return {Boolean}
* @api public
*/
SchemaString.prototype.checkRequired = function checkRequired(value, doc) {
if (SchemaType._isRef(this, value, doc, true)) {
return !!value;
}
// `require('util').inherits()` does **not** copy static properties, and
// plugins like mongoose-float use `inherits()` for pre-ES6.
const _checkRequired = typeof this.constructor.checkRequired == 'function' ?
this.constructor.checkRequired() :
SchemaString.checkRequired();
return _checkRequired(value);
};
/**
* Casts to String
*
* @api private
*/
SchemaString.prototype.cast = function(value, doc, init) {
if (SchemaType._isRef(this, value, doc, init)) {
// wait! we may need to cast this to a document
if (value === null || value === undefined) {
return value;
}
// lazy load
Document || (Document = require('./../document'));
if (value instanceof Document) {
value.$__.wasPopulated = true;
return value;
}
// setting a populated path
if (typeof value === 'string') {
return value;
} else if (Buffer.isBuffer(value) || !utils.isObject(value)) {
throw new CastError('string', value, this.path);
}
// Handle the case where user directly sets a populated
// path to a plain object; cast to the Model used in
// the population query.
const path = doc.$__fullPath(this.path);
const owner = doc.ownerDocument ? doc.ownerDocument() : doc;
const pop = owner.populated(path, true);
const ret = new pop.options.model(value);
ret.$__.wasPopulated = true;
return ret;
}
const castString = typeof this.constructor.cast === 'function' ?
this.constructor.cast() :
SchemaString.cast();
try {
return castString(value);
} catch (error) {
throw new CastError('string', value, this.path);
}
};
/*!
* ignore
*/
function handleSingle(val) {
return this.castForQuery(val);
}
function handleArray(val) {
const _this = this;
if (!Array.isArray(val)) {
return [this.castForQuery(val)];
}
return val.map(function(m) {
return _this.castForQuery(m);
});
}
SchemaString.prototype.$conditionalHandlers =
utils.options(SchemaType.prototype.$conditionalHandlers, {
$all: handleArray,
$gt: handleSingle,
$gte: handleSingle,
$lt: handleSingle,
$lte: handleSingle,
$options: handleSingle,
$regex: handleSingle,
$not: handleSingle
});
/**
* Casts contents for queries.
*
* @param {String} $conditional
* @param {any} [val]
* @api private
*/
SchemaString.prototype.castForQuery = function($conditional, val) {
let handler;
if (arguments.length === 2) {
handler = this.$conditionalHandlers[$conditional];
if (!handler) {
throw new Error('Can\'t use ' + $conditional + ' with String.');
}
return handler.call(this, val);
}
val = $conditional;
if (Object.prototype.toString.call(val) === '[object RegExp]') {
return val;
}
return this._castForQuery(val);
};
/*!
* Module exports.
*/
module.exports = SchemaString;
| 1 | 14,011 | Why is this change necessary? `handleSingle()` will cast it to a string, no? | Automattic-mongoose | js |
@@ -228,4 +228,13 @@ public interface Table {
* @return a {@link LocationProvider} to provide locations for new data files
*/
LocationProvider locationProvider();
+
+ /**
+ * Return the name of this table.
+ *
+ * @return this table's name
+ */
+ default String name() {
+ return "table(" + hashCode() + ")";
+ }
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
/**
* Represents a table.
*/
public interface Table {
/**
* Refresh the current table metadata.
*/
void refresh();
/**
* Create a new {@link TableScan scan} for this table.
* <p>
* Once a table scan is created, it can be refined to project columns and filter data.
*
* @return a table scan for this table
*/
TableScan newScan();
/**
* Return the {@link Schema schema} for this table.
*
* @return this table's schema
*/
Schema schema();
/**
* Return the {@link PartitionSpec partition spec} for this table.
*
* @return this table's partition spec
*/
PartitionSpec spec();
/**
* Return a map of {@link PartitionSpec partition specs} for this table.
*
* @return this table's partition specs map
*/
Map<Integer, PartitionSpec> specs();
/**
* Return a map of string properties for this table.
*
* @return this table's properties map
*/
Map<String, String> properties();
/**
* Return the table's base location.
*
* @return this table's location
*/
String location();
/**
* Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots.
*
* @return the current table Snapshot.
*/
Snapshot currentSnapshot();
/**
* Get the {@link Snapshot snapshot} of this table with the given id, or null if there is no
* matching snapshot.
*
* @return the {@link Snapshot} with the given id.
*/
Snapshot snapshot(long snapshotId);
/**
* Get the {@link Snapshot snapshots} of this table.
*
* @return an Iterable of snapshots of this table.
*/
Iterable<Snapshot> snapshots();
/**
* Get the snapshot history of this table.
*
* @return a list of {@link HistoryEntry history entries}
*/
List<HistoryEntry> history();
/**
* Create a new {@link UpdateSchema} to alter the columns of this table and commit the change.
*
* @return a new {@link UpdateSchema}
*/
UpdateSchema updateSchema();
/**
* Create a new {@link UpdateProperties} to update table properties and commit the changes.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link UpdateLocation} to update table location and commit the changes.
*
* @return a new {@link UpdateLocation}
*/
UpdateLocation updateLocation();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
* table and commit.
*
* @return a new {@link RewriteManifests}
*/
RewriteManifests rewriteManifests();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table and commit.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit.
*
* @return a new {@link Rollback}
*/
Rollback rollback();
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
*
* @return a new {@link Transaction}
*/
Transaction newTransaction();
/**
* @return a {@link FileIO} to read and write table data and metadata files
*/
FileIO io();
/**
* @return an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt
* data files.
*/
EncryptionManager encryption();
/**
* @return a {@link LocationProvider} to provide locations for new data files
*/
LocationProvider locationProvider();
}
| 1 | 16,752 | This newly added method should be well defined, should it return `TableIdentifier` or just `String`? | apache-iceberg | java |
@@ -18,7 +18,8 @@ use Ergonode\Core\Infrastructure\Model\RelationshipGroup;
class ProductMultimediaRelationshipStrategy implements RelationshipStrategyInterface
{
- private const MESSAGE = 'Object has active relationships with multimedia %relations%';
+ private const ONE_MESSAGE = 'Multimedia has a relation with product';
+ private const MULTIPLE_MESSAGE = 'Multimedia has %count% relations with some products';
private ProductQueryInterface $productQuery;
| 1 | <?php
/**
* Copyright © Ergonode Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\Product\Infrastructure\Strategy\Relationship;
use Ergonode\Core\Infrastructure\Strategy\RelationshipStrategyInterface;
use Ergonode\Product\Domain\Query\ProductQueryInterface;
use Ergonode\SharedKernel\Domain\Aggregate\MultimediaId;
use Ergonode\SharedKernel\Domain\Aggregate\ProductId;
use Ergonode\SharedKernel\Domain\AggregateId;
use Webmozart\Assert\Assert;
use Ergonode\Core\Infrastructure\Model\RelationshipGroup;
class ProductMultimediaRelationshipStrategy implements RelationshipStrategyInterface
{
private const MESSAGE = 'Object has active relationships with multimedia %relations%';
private ProductQueryInterface $productQuery;
public function __construct(ProductQueryInterface $productQuery)
{
$this->productQuery = $productQuery;
}
/**
* {@inheritDoc}
*/
public function supports(AggregateId $id): bool
{
return $id instanceof MultimediaId;
}
/**
* {@inheritDoc}
*/
public function getRelationshipGroup(AggregateId $id): RelationshipGroup
{
Assert::isInstanceOf($id, MultimediaId::class);
$result = [];
$list = $this->productQuery->getMultimediaRelation($id);
foreach (array_keys($list) as $productId) {
$result[] = new ProductId($productId);
}
return new RelationshipGroup(self::MESSAGE, $result);
}
}
| 1 | 9,596 | have a relation with a product | ergonode-backend | php |
@@ -129,8 +129,11 @@ var _ = Describe("health tests", func() {
AfterEach(func() {
for _, name := range podsToCleanUp {
- err := k8sInfra.K8sClient.CoreV1().Pods("default").Delete(name, &metav1.DeleteOptions{})
- Expect(err).NotTo(HaveOccurred())
+ // It is possible for a local pod to be deleted by GC if node is gone.
+ if k8sInfra.PodExist("default", name) {
+ err := k8sInfra.K8sClient.CoreV1().Pods("default").Delete(name, &metav1.DeleteOptions{})
+ Expect(err).NotTo(HaveOccurred())
+ }
}
podsToCleanUp = nil
}) | 1 | // +build fvtests
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fv_test
// The tests in this file test Felix's and Typha's health endpoints, http://.../liveness and
// http://.../readiness.
//
// Felix should report itself as live, so long as its calc_graph and int_dataplane loops have not
// died or hung; and as ready, so long as it has completed its initial dataplane programming, is
// connected to its datastore, and is not doing a resync (either the initial resync, or a subsequent
// one).
//
// Typha should report itself as live, so long as its Felix-serving loop has not died or hung; and
// as ready, so long as it is connected to its datastore, and is not doing a resync (either the
// initial resync, or a subsequent one).
//
// (These reports are useful because k8s can detect and handle a pod that is consistently non-live,
// by killing and restarting it; and can adjust for a pod that is non-ready, by (a) not routing
// Service traffic to it (when that pod is otherwise one of the possible backends for a Service),
// and (b) not moving on to the next pod, in a rolling upgrade process, until the just-upgraded pod
// says that it is ready.)
import (
"context"
"fmt"
"math/rand"
"net/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
log "github.com/sirupsen/logrus"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcalico/libcalico-go/lib/apis/v3"
"time"
"github.com/projectcalico/felix/fv/containers"
"github.com/projectcalico/felix/fv/infrastructure"
"github.com/projectcalico/felix/fv/utils"
"github.com/projectcalico/libcalico-go/lib/health"
"github.com/projectcalico/libcalico-go/lib/options"
)
var _ = Describe("health tests", func() {
var k8sInfra *infrastructure.K8sDatastoreInfra
BeforeEach(func() {
var err error
k8sInfra, err = infrastructure.GetK8sDatastoreInfra()
Expect(err).NotTo(HaveOccurred())
})
JustBeforeEach(func() {
// Felix can now flap ready/non-ready while loading its config. Delay until that
// is done.
time.Sleep(1 * time.Second)
})
AfterEach(func() {
k8sInfra.Stop()
})
var felixContainer *containers.Container
var felixReady, felixLiveness func() int
// describeCommonFelixTests creates specs for Felix tests that are common between the
// two scenarios below (with and without Typha).
describeCommonFelixTests := func() {
var podsToCleanUp []string
Describe("with normal Felix startup", func() {
It("should become ready and stay ready", func() {
Eventually(felixReady, "5s", "100ms").Should(BeGood())
Consistently(felixReady, "10s", "1s").Should(BeGood())
})
It("should become live and stay live", func() {
Eventually(felixLiveness, "5s", "100ms").Should(BeGood())
Consistently(felixLiveness, "10s", "1s").Should(BeGood())
})
})
createLocalPod := func() {
testPodName := fmt.Sprintf("test-pod-%x", rand.Uint32())
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: testPodName},
Spec: v1.PodSpec{Containers: []v1.Container{{
Name: fmt.Sprintf("container-foo"),
Image: "ignore",
}},
NodeName: felixContainer.Hostname,
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
}},
PodIP: "10.0.0.1",
},
}
var err error
pod, err = k8sInfra.K8sClient.CoreV1().Pods("default").Create(pod)
Expect(err).NotTo(HaveOccurred())
pod.Status.PodIP = "10.0.0.1"
_, err = k8sInfra.K8sClient.CoreV1().Pods("default").UpdateStatus(pod)
Expect(err).NotTo(HaveOccurred())
podsToCleanUp = append(podsToCleanUp, testPodName)
}
AfterEach(func() {
for _, name := range podsToCleanUp {
err := k8sInfra.K8sClient.CoreV1().Pods("default").Delete(name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
podsToCleanUp = nil
})
Describe("after removing iptables-restore", func() {
BeforeEach(func() {
// Wait until felix gets into steady state.
Eventually(felixReady, "5s", "100ms").Should(BeGood())
// Then remove iptables-restore.
err := felixContainer.ExecMayFail("rm", "/sbin/iptables-restore")
Expect(err).NotTo(HaveOccurred())
// Make an update that will force felix to run iptables-restore.
createLocalPod()
})
It("should become unready, then die", func() {
Eventually(felixReady, "120s", "10s").ShouldNot(BeGood())
Eventually(felixContainer.Stopped, "5s").Should(BeTrue())
})
})
Describe("after replacing iptables with a slow version", func() {
BeforeEach(func() {
// Wait until felix gets into steady state.
Eventually(felixReady, "5s", "100ms").Should(BeGood())
// Then replace iptables-restore with the bad version:
// We need to delete the file first since it's a symlink and "docker cp"
// follows the link and overwrites the wrong file if we don't.
err := felixContainer.ExecMayFail("rm", "/sbin/iptables-restore")
Expect(err).NotTo(HaveOccurred())
// Copy in the nobbled iptables command.
err = felixContainer.CopyFileIntoContainer("slow-iptables-restore",
"/sbin/iptables-restore")
Expect(err).NotTo(HaveOccurred())
// Make it executable.
err = felixContainer.ExecMayFail("chmod", "+x", "/sbin/iptables-restore")
Expect(err).NotTo(HaveOccurred())
// Make an update that will force felix to run iptables-restore.
createLocalPod()
})
It("should detect dataplane pause and become non-ready", func() {
Eventually(felixReady, "120s", "10s").ShouldNot(BeGood())
})
})
}
var typhaContainer *containers.Container
var typhaReady, typhaLiveness func() int
startTypha := func(getDockerArgs func() []string) {
typhaContainer = containers.Run("typha",
containers.RunOpts{AutoRemove: true},
append(getDockerArgs(),
"--privileged",
"-e", "TYPHA_HEALTHENABLED=true",
"-e", "TYPHA_HEALTHHOST=0.0.0.0",
"-e", "TYPHA_LOGSEVERITYSCREEN=info",
"-e", "TYPHA_DATASTORETYPE=kubernetes",
"-e", "TYPHA_PROMETHEUSMETRICSENABLED=true",
"-e", "TYPHA_USAGEREPORTINGENABLED=false",
"-e", "TYPHA_DEBUGMEMORYPROFILEPATH=\"heap-<timestamp>\"",
utils.Config.TyphaImage,
"calico-typha")...)
Expect(typhaContainer).NotTo(BeNil())
typhaReady = getHealthStatus(typhaContainer.IP, "9098", "readiness")
typhaLiveness = getHealthStatus(typhaContainer.IP, "9098", "liveness")
}
startFelix := func(typhaAddr string, getDockerArgs func() []string, calcGraphHangTime, dataplaneHangTime, healthHost string) {
felixContainer = containers.Run("felix",
containers.RunOpts{AutoRemove: true},
append(getDockerArgs(),
"--privileged",
"-e", "FELIX_IPV6SUPPORT=false",
"-e", "FELIX_HEALTHENABLED=true",
"-e", "FELIX_HEALTHHOST="+healthHost,
"-e", "FELIX_LOGSEVERITYSCREEN=info",
"-e", "FELIX_PROMETHEUSMETRICSENABLED=true",
"-e", "FELIX_USAGEREPORTINGENABLED=false",
"-e", "FELIX_DEBUGMEMORYPROFILEPATH=\"heap-<timestamp>\"",
"-e", "FELIX_DebugSimulateCalcGraphHangAfter="+calcGraphHangTime,
"-e", "FELIX_DebugSimulateDataplaneHangAfter="+dataplaneHangTime,
"-e", "FELIX_TYPHAADDR="+typhaAddr,
utils.Config.FelixImage)...)
Expect(felixContainer).NotTo(BeNil())
felixReady = getHealthStatus(felixContainer.IP, "9099", "readiness")
felixLiveness = getHealthStatus(felixContainer.IP, "9099", "liveness")
}
Describe("healthHost not 'all interfaces'", func() {
checkHealthInternally := func() error {
_, err := felixContainer.ExecOutput("wget", "-S", "-T", "2", "http://127.0.0.1:9099/readiness", "-O", "-")
return err
}
It("should run healthchecks on localhost by default", func() {
startFelix("", k8sInfra.GetDockerArgs, "", "", "")
Eventually(checkHealthInternally, "10s", "100ms").ShouldNot(HaveOccurred())
})
It("should run support running healthchecks on '127.0.0.1'", func() {
startFelix("", k8sInfra.GetDockerArgs, "", "", "127.0.0.1")
Eventually(checkHealthInternally, "10s", "100ms").ShouldNot(HaveOccurred())
})
It("should support running healthchecks on 'localhost'", func() {
startFelix("", k8sInfra.GetDockerArgs, "", "", "localhost")
Eventually(checkHealthInternally, "10s", "100ms").ShouldNot(HaveOccurred())
})
AfterEach(func() {
felixContainer.Stop()
})
})
Describe("with Felix running (no Typha)", func() {
BeforeEach(func() {
startFelix("", k8sInfra.GetDockerArgs, "", "", "0.0.0.0")
})
AfterEach(func() {
felixContainer.Stop()
})
describeCommonFelixTests()
})
Describe("with Felix (no Typha) and Felix calc graph set to hang", func() {
BeforeEach(func() {
startFelix("", k8sInfra.GetDockerArgs, "5", "", "0.0.0.0")
})
AfterEach(func() {
felixContainer.Stop()
})
It("should report live initially, then become non-live", func() {
Eventually(felixLiveness, "10s", "100ms").Should(BeGood())
Eventually(felixLiveness, "30s", "100ms").Should(BeBad())
Consistently(felixLiveness, "10s", "100ms").Should(BeBad())
})
})
Describe("with Felix (no Typha) and Felix dataplane set to hang", func() {
BeforeEach(func() {
startFelix("", k8sInfra.GetDockerArgs, "", "5", "0.0.0.0")
})
AfterEach(func() {
felixContainer.Stop()
})
It("should report live initially, then become non-live", func() {
Eventually(felixLiveness, "10s", "100ms").Should(BeGood())
Eventually(felixLiveness, "30s", "100ms").Should(BeBad())
Consistently(felixLiveness, "10s", "100ms").Should(BeBad())
})
})
Describe("with Felix and Typha running", func() {
BeforeEach(func() {
startTypha(k8sInfra.GetDockerArgs)
startFelix(typhaContainer.IP+":5473", k8sInfra.GetDockerArgs, "", "", "0.0.0.0")
})
AfterEach(func() {
felixContainer.Stop()
typhaContainer.Stop()
})
describeCommonFelixTests()
It("typha should report ready", func() {
Eventually(typhaReady, "5s", "100ms").Should(BeGood())
Consistently(typhaReady, "10s", "1s").Should(BeGood())
})
It("typha should report live", func() {
Eventually(typhaLiveness, "5s", "100ms").Should(BeGood())
Consistently(typhaLiveness, "10s", "1s").Should(BeGood())
})
})
Describe("with typha connected to bad API endpoint", func() {
BeforeEach(func() {
startTypha(k8sInfra.GetBadEndpointDockerArgs)
})
AfterEach(func() {
typhaContainer.Stop()
})
It("typha should not report ready", func() {
Consistently(typhaReady, "10s", "1s").ShouldNot(BeGood())
})
It("typha should not report live", func() {
Consistently(typhaLiveness(), "10s", "1s").ShouldNot(BeGood())
})
})
Describe("with datastore not ready", func() {
var (
info *v3.ClusterInformation
)
BeforeEach(func() {
var err error
info, err = k8sInfra.GetCalicoClient().ClusterInformation().Get(
context.Background(),
"default",
options.GetOptions{},
)
Expect(err).NotTo(HaveOccurred())
log.Infof("info = %#v", info)
notReady := false
info.Spec.DatastoreReady = ¬Ready
info, err = k8sInfra.GetCalicoClient().ClusterInformation().Update(
context.Background(),
info,
options.SetOptions{},
)
Expect(err).NotTo(HaveOccurred())
startFelix("", k8sInfra.GetDockerArgs, "", "", "0.0.0.0")
})
AfterEach(func() {
if info != nil {
ready := true
info.Spec.DatastoreReady = &ready
var err error
info, err = k8sInfra.GetCalicoClient().ClusterInformation().Update(
context.Background(),
info,
options.SetOptions{},
)
Expect(err).NotTo(HaveOccurred())
}
})
AfterEach(func() {
felixContainer.Stop()
})
It("felix should report ready", func() {
Eventually(felixReady, "5s", "100ms").Should(BeGood())
Consistently(felixReady, "10s", "1s").Should(BeGood())
})
It("felix should report live", func() {
Eventually(felixLiveness, "5s", "100ms").Should(BeGood())
Consistently(felixLiveness, "10s", "1s").Should(BeGood())
})
})
})
const statusErr = -1
func getHealthStatus(ip, port, endpoint string) func() int {
return func() int {
resp, err := http.Get("http://" + ip + ":" + port + "/" + endpoint)
if err != nil {
log.WithError(err).WithField("resp", resp).Warn("HTTP GET failed")
return statusErr
}
defer resp.Body.Close()
log.WithField("resp", resp).Info("Health response")
return resp.StatusCode
}
}
func BeErr() types.GomegaMatcher {
return BeNumerically("==", statusErr)
}
func BeBad() types.GomegaMatcher {
return BeNumerically("==", health.StatusBad)
}
func BeGood() types.GomegaMatcher {
return BeNumerically("==", health.StatusGood)
}
| 1 | 16,586 | I guess that there is still a window here, because the GC could happen between the `PodExist` and `Delete` calls. Would it be better instead to check `err` and allow it if it says "pod has already been deleted"? | projectcalico-felix | c |
@@ -65,13 +65,8 @@ class DbTaskHistory(task_history.TaskHistory):
yield session
else:
session = self.session_factory()
- try:
+ with session.transaction:
yield session
- except:
- session.rollback()
- raise
- else:
- session.commit()
def __init__(self):
config = configuration.get_config() | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Provides a database backend to the central scheduler. This lets you see historical runs.
See :ref:`TaskHistory` for information about how to turn out the task history feature.
"""
#
# Description: Added codes for visualization of how long each task takes
# running-time until it reaches the next status (failed or done)
# At "{base_url}/tasklist", all completed(failed or done) tasks are shown.
# At "{base_url}/tasklist", a user can select one specific task to see
# how its running-time has changed over time.
# At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph
# that represents the changes of the running-time for a selected task
# up to the next status (failed or done).
# This visualization let us know how the running-time of the specific task
# has changed over time.
#
# Copyright 2015 Naver Corp.
# Author Yeseul Park ([email protected])
#
import datetime
import logging
from contextlib import contextmanager
from luigi import six
from luigi import configuration
from luigi import task_history
from luigi.task_status import DONE, FAILED, PENDING, RUNNING
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
import sqlalchemy.orm.collections
Base = sqlalchemy.ext.declarative.declarative_base()
logger = logging.getLogger('luigi-interface')
class DbTaskHistory(task_history.TaskHistory):
"""
Task History that writes to a database using sqlalchemy.
Also has methods for useful db queries.
"""
@contextmanager
def _session(self, session=None):
if session:
yield session
else:
session = self.session_factory()
try:
yield session
except:
session.rollback()
raise
else:
session.commit()
def __init__(self):
config = configuration.get_config()
connection_string = config.get('task_history', 'db_connection')
self.engine = sqlalchemy.create_engine(connection_string)
self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, expire_on_commit=False)
Base.metadata.create_all(self.engine)
self.tasks = {} # task_id -> TaskRecord
def task_scheduled(self, task_id):
task = self._get_task(task_id, status=PENDING)
self._add_task_event(task, TaskEvent(event_name=PENDING, ts=datetime.datetime.now()))
def task_finished(self, task_id, successful):
event_name = DONE if successful else FAILED
task = self._get_task(task_id, status=event_name)
self._add_task_event(task, TaskEvent(event_name=event_name, ts=datetime.datetime.now()))
def task_started(self, task_id, worker_host):
task = self._get_task(task_id, status=RUNNING, host=worker_host)
self._add_task_event(task, TaskEvent(event_name=RUNNING, ts=datetime.datetime.now()))
def _get_task(self, task_id, status, host=None):
if task_id in self.tasks:
task = self.tasks[task_id]
task.status = status
if host:
task.host = host
else:
task = self.tasks[task_id] = task_history.Task(task_id, status, host)
return task
def _add_task_event(self, task, event):
for (task_record, session) in self._find_or_create_task(task):
task_record.events.append(event)
def _find_or_create_task(self, task):
with self._session() as session:
if task.record_id is not None:
logger.debug("Finding task with record_id [%d]", task.record_id)
task_record = session.query(TaskRecord).get(task.record_id)
if not task_record:
raise Exception("Task with record_id, but no matching Task record!")
yield (task_record, session)
else:
task_record = TaskRecord(name=task.task_family, host=task.host)
for (k, v) in six.iteritems(task.parameters):
task_record.parameters[k] = TaskParameter(name=k, value=v)
session.add(task_record)
yield (task_record, session)
if task.host:
task_record.host = task.host
task.record_id = task_record.id
def find_all_by_parameters(self, task_name, session=None, **task_params):
"""
Find tasks with the given task_name and the same parameters as the kwargs.
"""
with self._session(session) as session:
tasks = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name).order_by(TaskEvent.ts).all()
for task in tasks:
if all(k in task.parameters and v == str(task.parameters[k].value) for (k, v) in six.iteritems(task_params)):
yield task
def find_all_by_name(self, task_name, session=None):
"""
Find all tasks with the given task_name.
"""
return self.find_all_by_parameters(task_name, session)
def find_latest_runs(self, session=None):
"""
Return tasks that have been updated in the past 24 hours.
"""
with self._session(session) as session:
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
return session.query(TaskRecord).\
join(TaskEvent).\
filter(TaskEvent.ts >= yesterday).\
group_by(TaskRecord.id, TaskEvent.event_name, TaskEvent.ts).\
order_by(TaskEvent.ts.desc()).\
all()
def find_all_runs(self, session=None):
"""
Return all tasks that have been updated.
"""
with self._session(session) as session:
return session.query(TaskRecord).all()
def find_all_events(self, session=None):
"""
Return all running/failed/done events.
"""
with self._session(session) as session:
return session.query(TaskEvent).all()
def find_task_by_id(self, id, session=None):
"""
Find task with the given record ID.
"""
with self._session(session) as session:
return session.query(TaskRecord).get(id)
class TaskParameter(Base):
"""
Table to track luigi.Parameter()s of a Task.
"""
__tablename__ = 'task_parameters'
task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(128), primary_key=True)
value = sqlalchemy.Column(sqlalchemy.String(256))
def __repr__(self):
return "TaskParameter(task_id=%d, name=%s, value=%s)" % (self.task_id, self.name, self.value)
class TaskEvent(Base):
"""
Table to track when a task is scheduled, starts, finishes, and fails.
"""
__tablename__ = 'task_events'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'))
event_name = sqlalchemy.Column(sqlalchemy.String(20))
ts = sqlalchemy.Column(sqlalchemy.TIMESTAMP, index=True, nullable=False)
def __repr__(self):
return "TaskEvent(task_id=%s, event_name=%s, ts=%s" % (self.task_id, self.event_name, self.ts)
class TaskRecord(Base):
"""
Base table to track information about a luigi.Task.
References to other tables are available through task.events, task.parameters, etc.
"""
__tablename__ = 'tasks'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(128), index=True)
host = sqlalchemy.Column(sqlalchemy.String(128))
parameters = sqlalchemy.orm.relationship(
'TaskParameter',
collection_class=sqlalchemy.orm.collections.attribute_mapped_collection('name'),
cascade="all, delete-orphan")
events = sqlalchemy.orm.relationship(
'TaskEvent',
order_by=(sqlalchemy.desc(TaskEvent.ts), sqlalchemy.desc(TaskEvent.id)),
backref='task')
def __repr__(self):
return "TaskRecord(name=%s, host=%s)" % (self.name, self.host)
| 1 | 12,624 | SQLAlchemy's session management does magic to make sure that if the rollback fails you still get the original exception that caused the rollback. Also it looks nicer. | spotify-luigi | py |
@@ -88,11 +88,11 @@ func (q *actQueue) Overlaps(act *iproto.ActionPb) bool {
switch {
case act.GetTransfer() != nil:
tsf := &action.Transfer{}
- tsf.ConvertFromTransferPb(act.GetTransfer())
+ tsf.ConvertFromActionPb(act)
nonce = tsf.Nonce
case act.GetVote() != nil:
vote := &action.Vote{}
- vote.ConvertFromVotePb(act.GetVote())
+ vote.ConvertFromActionPb(act)
nonce = vote.Nonce
}
return q.items[nonce] != nil | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package actpool
import (
"container/heap"
"math/big"
"sort"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/blockchain/action"
"github.com/iotexproject/iotex-core/proto"
)
type noncePriorityQueue []uint64
func (h noncePriorityQueue) Len() int { return len(h) }
func (h noncePriorityQueue) Less(i, j int) bool { return h[i] < h[j] }
func (h noncePriorityQueue) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *noncePriorityQueue) Push(x interface{}) {
in, ok := x.(uint64)
if !ok {
return
}
*h = append(*h, in)
}
func (h *noncePriorityQueue) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// ActQueue is the interface of actQueue
type ActQueue interface {
Overlaps(*iproto.ActionPb) bool
Put(*iproto.ActionPb) error
FilterNonce(uint64) []*iproto.ActionPb
SetStartNonce(uint64)
StartNonce() uint64
UpdateQueue(uint64) []*iproto.ActionPb
SetPendingNonce(uint64)
PendingNonce() uint64
SetPendingBalance(*big.Int)
PendingBalance() *big.Int
Len() int
Empty() bool
PendingActs() []*iproto.ActionPb
AllActs() []*iproto.ActionPb
}
// actQueue is a queue of actions from an account
type actQueue struct {
// Map that stores all the actions belonging to an account associated with nonces
items map[uint64]*iproto.ActionPb
// Priority Queue that stores all the nonces belonging to an account. Nonces are used as indices for action map
index noncePriorityQueue
// Current nonce tracking the first action in queue
startNonce uint64
// Current pending nonce tracking previous actions that can be committed to the next block for the account
pendingNonce uint64
// Current pending balance for the account
pendingBalance *big.Int
}
// NewActQueue create a new action queue
func NewActQueue() ActQueue {
return &actQueue{
items: make(map[uint64]*iproto.ActionPb),
index: noncePriorityQueue{},
startNonce: uint64(1), // Taking coinbase Action into account, startNonce should start with 1
pendingNonce: uint64(1), // Taking coinbase Action into account, pendingNonce should start with 1
pendingBalance: big.NewInt(0),
}
}
// Overlap returns whether the current queue contains the given nonce
func (q *actQueue) Overlaps(act *iproto.ActionPb) bool {
var nonce uint64
switch {
case act.GetTransfer() != nil:
tsf := &action.Transfer{}
tsf.ConvertFromTransferPb(act.GetTransfer())
nonce = tsf.Nonce
case act.GetVote() != nil:
vote := &action.Vote{}
vote.ConvertFromVotePb(act.GetVote())
nonce = vote.Nonce
}
return q.items[nonce] != nil
}
// Put inserts a new action into the map, also updating the queue's nonce index
func (q *actQueue) Put(act *iproto.ActionPb) error {
var nonce uint64
switch {
case act.GetTransfer() != nil:
tsf := &action.Transfer{}
tsf.ConvertFromTransferPb(act.GetTransfer())
nonce = tsf.Nonce
case act.GetVote() != nil:
vote := &action.Vote{}
vote.ConvertFromVotePb(act.GetVote())
nonce = vote.Nonce
}
if q.items[nonce] != nil {
return errors.Wrapf(ErrNonce, "duplicate nonce")
}
heap.Push(&q.index, nonce)
q.items[nonce] = act
return nil
}
// FilterNonce removes all actions from the map with a nonce lower than the given threshold
func (q *actQueue) FilterNonce(threshold uint64) []*iproto.ActionPb {
var removed []*iproto.ActionPb
// Pop off priority queue and delete corresponding entries from map until the threshold is reached
for q.index.Len() > 0 && (q.index)[0] < threshold {
nonce := heap.Pop(&q.index).(uint64)
removed = append(removed, q.items[nonce])
delete(q.items, nonce)
}
return removed
}
// UpdateQueue updates the pending nonce and balance of the queue
func (q *actQueue) UpdateQueue(nonce uint64) []*iproto.ActionPb {
// First, starting from the current pending nonce, incrementally find the next pending nonce
// while updating pending balance if transfers are payable
for ; q.items[nonce] != nil; nonce++ {
if q.items[nonce].GetVote() != nil {
continue
}
tsf := &action.Transfer{}
tsf.ConvertFromTransferPb(q.items[nonce].GetTransfer())
if q.pendingBalance.Cmp(tsf.Amount) < 0 {
break
}
q.pendingBalance.Sub(q.pendingBalance, tsf.Amount)
}
q.pendingNonce = nonce
// Find the index of new pending nonce within the queue
sort.Sort(q.index)
i := 0
for ; i < q.index.Len(); i++ {
if q.index[i] >= nonce {
break
}
}
// Case I: An unpayable transfer has been found while updating pending nonce/balance
// Remove all the subsequent actions in the queue starting from the index of new pending nonce
if q.items[nonce] != nil {
return q.removeActs(i)
}
// Case II: All transfers are payable while updating pending nonce/balance
// Check all the subsequent actions in the queue starting from the index of new pending nonce
// Find the nonce index of the first unpayable transfer
// Remove all the subsequent actions in the queue starting from that index
for ; i < q.index.Len(); i++ {
nonce = q.index[i]
if transfer := q.items[nonce].GetTransfer(); transfer != nil {
tsf := &action.Transfer{}
tsf.ConvertFromTransferPb(transfer)
if q.pendingBalance.Cmp(tsf.Amount) < 0 {
break
}
}
}
return q.removeActs(i)
}
// SetStartNonce sets the new start nonce for the queue
func (q *actQueue) SetStartNonce(nonce uint64) {
q.startNonce = nonce
}
// StartNonce returns the current start nonce of the queue
func (q *actQueue) StartNonce() uint64 {
return q.startNonce
}
// SetPendingNonce sets pending nonce for the queue
func (q *actQueue) SetPendingNonce(nonce uint64) {
q.pendingNonce = nonce
}
// PendingNonce returns the current pending nonce of the queue
func (q *actQueue) PendingNonce() uint64 {
return q.pendingNonce
}
// SetPendingBalance sets pending balance for the queue
func (q *actQueue) SetPendingBalance(balance *big.Int) {
q.pendingBalance = balance
}
// PendingBalance returns the current pending balance of the queue
func (q *actQueue) PendingBalance() *big.Int {
return q.pendingBalance
}
// Len returns the length of the action map
func (q *actQueue) Len() int {
return len(q.items)
}
// Empty returns whether the queue of actions is empty or not
func (q *actQueue) Empty() bool {
return q.Len() == 0
}
// PendingActs creates a consecutive nonce-sorted slice of actions
func (q *actQueue) PendingActs() []*iproto.ActionPb {
if q.Len() == 0 {
return []*iproto.ActionPb{}
}
acts := make([]*iproto.ActionPb, 0, len(q.items))
nonce := q.index[0]
for ; q.items[nonce] != nil; nonce++ {
acts = append(acts, q.items[nonce])
}
return acts
}
// AllActs returns all the actions currently in queue
func (q *actQueue) AllActs() []*iproto.ActionPb {
acts := make([]*iproto.ActionPb, 0, len(q.items))
if q.Len() == 0 {
return acts
}
sort.Sort(q.index)
for _, nonce := range q.index {
acts = append(acts, q.items[nonce])
}
return acts
}
// removeActs removes all the actions starting at idx from queue
func (q *actQueue) removeActs(idx int) []*iproto.ActionPb {
removedFromQueue := make([]*iproto.ActionPb, 0)
for i := idx; i < q.index.Len(); i++ {
removedFromQueue = append(removedFromQueue, q.items[q.index[i]])
delete(q.items, q.index[i])
}
q.index = q.index[:idx]
heap.Init(&q.index)
return removedFromQueue
}
| 1 | 11,748 | This switch statement can be removed. Just return q.items[act.Nonce] != nil | iotexproject-iotex-core | go |
@@ -119,7 +119,7 @@ public class AvoidInstantiatingObjectsInLoopsRule extends AbstractJavaRule {
*/
n = n.getParent();
} else if (n.getParent() instanceof ASTForStatement && n.getParent().getNumChildren() > 1
- && n == n.getParent().getChild(1)) {
+ && n.equals(n.getParent().getChild(1))) {
// it is the second child of a ForStatement - which means
// we are dealing with a for-each construct
// In that case, we can ignore this allocation expression, as | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.rule.performance;
import java.util.Collection;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.java.ast.ASTAllocationExpression;
import net.sourceforge.pmd.lang.java.ast.ASTArgumentList;
import net.sourceforge.pmd.lang.java.ast.ASTBlock;
import net.sourceforge.pmd.lang.java.ast.ASTBlockStatement;
import net.sourceforge.pmd.lang.java.ast.ASTBreakStatement;
import net.sourceforge.pmd.lang.java.ast.ASTDoStatement;
import net.sourceforge.pmd.lang.java.ast.ASTForInit;
import net.sourceforge.pmd.lang.java.ast.ASTForStatement;
import net.sourceforge.pmd.lang.java.ast.ASTPrimaryExpression;
import net.sourceforge.pmd.lang.java.ast.ASTPrimarySuffix;
import net.sourceforge.pmd.lang.java.ast.ASTReturnStatement;
import net.sourceforge.pmd.lang.java.ast.ASTStatementExpression;
import net.sourceforge.pmd.lang.java.ast.ASTThrowStatement;
import net.sourceforge.pmd.lang.java.ast.ASTWhileStatement;
import net.sourceforge.pmd.lang.java.rule.AbstractJavaRule;
import net.sourceforge.pmd.lang.java.types.TypeTestUtil;
public class AvoidInstantiatingObjectsInLoopsRule extends AbstractJavaRule {
public AvoidInstantiatingObjectsInLoopsRule() {
addRuleChainVisit(ASTAllocationExpression.class);
}
/**
* This method is used to check whether user instantiates variables
* which are not assigned to arrays/lists in loops.
* @param node This is the expression of part of java code to be checked.
* @param data This is the data to return.
* @return Object This returns the data passed in. If violation happens, violation is added to data.
*/
@Override
public Object visit(ASTAllocationExpression node, Object data) {
if (notInsideLoop(node)) {
return data;
}
if (fourthParentNotThrow(node)
&& fourthParentNotReturn(node)
&& notArrayAssignment(node)
&& notCollectionAccess(node)
&& notBreakFollowing(node)) {
addViolation(data, node);
}
return data;
}
private boolean notArrayAssignment(ASTAllocationExpression node) {
if (node.getNthParent(4) instanceof ASTStatementExpression) {
ASTPrimaryExpression assignee = node.getNthParent(4).getFirstChildOfType(ASTPrimaryExpression.class);
ASTPrimarySuffix suffix = assignee.getFirstChildOfType(ASTPrimarySuffix.class);
return suffix == null || !suffix.isArrayDereference();
}
return true;
}
private boolean notCollectionAccess(ASTAllocationExpression node) {
if (node.getNthParent(4) instanceof ASTArgumentList && node.getNthParent(8) instanceof ASTStatementExpression) {
ASTStatementExpression statement = (ASTStatementExpression) node.getNthParent(8);
return !TypeTestUtil.isA(Collection.class, statement);
}
return true;
}
private boolean notBreakFollowing(ASTAllocationExpression node) {
ASTBlockStatement blockStatement = node.getFirstParentOfType(ASTBlockStatement.class);
if (blockStatement != null) {
ASTBlock block = blockStatement.getFirstParentOfType(ASTBlock.class);
if (block.getNumChildren() > blockStatement.getIndexInParent() + 1) {
ASTBlockStatement next = (ASTBlockStatement) block.getChild(blockStatement.getIndexInParent() + 1);
if (next.getNumChildren() == 1 && next.getChild(0).getNumChildren() == 1) {
return !(next.getChild(0).getChild(0) instanceof ASTBreakStatement);
}
}
}
return true;
}
/**
* This method is used to check whether this expression is a throw statement.
* @param node This is the expression of part of java code to be checked.
* @return boolean This returns Whether the fourth parent of node is an instance of throw statement.
*/
private boolean fourthParentNotThrow(ASTAllocationExpression node) {
return !(node.getNthParent(4) instanceof ASTThrowStatement);
}
/**
* This method is used to check whether this expression is a return statement.
* @param node This is the expression of part of java code to be checked.
* @return boolean This returns Whether the fourth parent of node is an instance of return statement.
*/
private boolean fourthParentNotReturn(ASTAllocationExpression node) {
return !(node.getNthParent(4) instanceof ASTReturnStatement);
}
/**
* This method is used to check whether this expression is not in a loop.
* @param node This is the expression of part of java code to be checked.
* @return boolean <code>false</code> if the given node is inside a loop, <code>true</code> otherwise
*/
private boolean notInsideLoop(ASTAllocationExpression node) {
Node n = node.getParent();
while (n != null) {
if (n instanceof ASTDoStatement || n instanceof ASTWhileStatement || n instanceof ASTForStatement) {
return false;
} else if (n instanceof ASTForInit) {
/*
* init part is not technically inside the loop. Skip parent
* ASTForStatement but continue higher up to detect nested loops
*/
n = n.getParent();
} else if (n.getParent() instanceof ASTForStatement && n.getParent().getNumChildren() > 1
&& n == n.getParent().getChild(1)) {
// it is the second child of a ForStatement - which means
// we are dealing with a for-each construct
// In that case, we can ignore this allocation expression, as
// the second child
// is the expression, over which to iterate.
// Skip this parent but continue higher up
// to detect nested loops
n = n.getParent();
}
n = n.getParent();
}
return true;
}
}
| 1 | 18,232 | I think `==` for nodes is more readable than equals. An equals calls looks like it could be recursing, because intuitively two nodes are equal if their subtree are the equal. But everywhere you replaced, we don't want to test whether the subtrees are structurally equal, we want to know whether they're the same. Only `==` captures this intent, using equals obscures this. Not to mention that equals may NPE and is in that sense less ergonomic. I think we should enhance the rule with a list of types for which `==` is correct. Edit: in the meantime i think we could suppress those new violations... | pmd-pmd | java |
@@ -29,6 +29,9 @@ namespace ResultsComparer
[Option("csv", HelpText = "Path to exported CSV results. Optional.")]
public FileInfo CsvPath { get; set; }
+ [Option('f', "filter", HelpText = "Filter the benchmarks by name using glob pattern(s). Optional.")]
+ public IEnumerable<string> Filters { get; set; }
+
[Usage(ApplicationAlias = "")]
public static IEnumerable<Example> Examples
{ | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System.Collections.Generic;
using System.IO;
using CommandLine;
using CommandLine.Text;
namespace ResultsComparer
{
public class CommandLineOptions
{
[Option("base", HelpText = "Path to the folder/file with base results.")]
public string BasePath { get; set; }
[Option("diff", HelpText = "Path to the folder/file with diff results.")]
public string DiffPath { get; set; }
[Option("threshold", Required = true, HelpText = "Threshold for Statistical Test. Examples: 5%, 10ms, 100ns, 1s.")]
public string StatisticalTestThreshold { get; set; }
[Option("noise", HelpText = "Noise threshold for Statistical Test. The difference for 1.0ns and 1.1ns is 10%, but it's just a noise. Examples: 0.5ns 1ns.", Default = "0.3ns" )]
public string NoiseThreshold { get; set; }
[Option("top", HelpText = "Filter the diff to top/bottom N results. Optional.")]
public int? TopCount { get; set; }
[Option("csv", HelpText = "Path to exported CSV results. Optional.")]
public FileInfo CsvPath { get; set; }
[Usage(ApplicationAlias = "")]
public static IEnumerable<Example> Examples
{
get
{
yield return new Example(@"Compare the results stored in 'C:\results\win' (base) vs 'C:\results\unix' (diff) using 5% threshold.",
new CommandLineOptions { BasePath = @"C:\results\win", DiffPath = @"C:\results\unix", StatisticalTestThreshold = "5%" });
yield return new Example(@"Compare the results stored in 'C:\results\win' (base) vs 'C:\results\unix' (diff) using 5% threshold and show only top/bottom 10 results.",
new CommandLineOptions { BasePath = @"C:\results\win", DiffPath = @"C:\results\unix", StatisticalTestThreshold = "5%", TopCount = 10 });
yield return new Example(@"Compare the results stored in 'C:\results\win' (base) vs 'C:\results\unix' (diff) using 5% threshold and 0.5ns noise filter.",
new CommandLineOptions { BasePath = @"C:\results\win", DiffPath = @"C:\results\unix", StatisticalTestThreshold = "5%", NoiseThreshold = "0.5ns" });
}
}
}
} | 1 | 9,688 | What's the scenario for passing multiple filters? | dotnet-performance | .cs |
@@ -201,9 +201,10 @@ bool Part::commitLogs(std::unique_ptr<LogIterator> iter) {
LogID lastId = -1;
TermID lastTerm = -1;
while (iter->valid()) {
- lastId = iter->logId();
- lastTerm = iter->logTerm();
- auto log = iter->logMsg();
+ auto logEntry = iter->logEntry();
+ lastId = std::get<0>(logEntry);
+ lastTerm = std::get<1>(logEntry);
+ auto log = std::get<3>(logEntry);
if (log.empty()) {
VLOG(3) << idStr_ << "Skip the heartbeat!";
++(*iter); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "kvstore/Part.h"
#include "kvstore/LogEncoder.h"
#include "base/NebulaKeyUtils.h"
DEFINE_int32(cluster_id, 0, "A unique id for each cluster");
namespace nebula {
namespace kvstore {
using raftex::AppendLogResult;
namespace {
ResultCode toResultCode(AppendLogResult res) {
switch (res) {
case AppendLogResult::SUCCEEDED:
return ResultCode::SUCCEEDED;
case AppendLogResult::E_NOT_A_LEADER:
return ResultCode::ERR_LEADER_CHANGED;
default:
return ResultCode::ERR_CONSENSUS_ERROR;
}
}
} // Anonymous namespace
Part::Part(GraphSpaceID spaceId,
PartitionID partId,
HostAddr localAddr,
const std::string& walPath,
KVEngine* engine,
std::shared_ptr<folly::IOThreadPoolExecutor> ioPool,
std::shared_ptr<thread::GenericThreadPool> workers,
std::shared_ptr<folly::Executor> handlers,
std::shared_ptr<raftex::SnapshotManager> snapshotMan)
: RaftPart(FLAGS_cluster_id,
spaceId,
partId,
localAddr,
walPath,
ioPool,
workers,
handlers,
snapshotMan)
, spaceId_(spaceId)
, partId_(partId)
, walPath_(walPath)
, engine_(engine) {
}
std::pair<LogID, TermID> Part::lastCommittedLogId() {
std::string val;
ResultCode res = engine_->get(NebulaKeyUtils::systemCommitKey(partId_), &val);
if (res != ResultCode::SUCCEEDED) {
LOG(ERROR) << "Cannot fetch the last committed log id from the storage engine";
return std::make_pair(0, 0);
}
CHECK_EQ(val.size(), sizeof(LogID) + sizeof(TermID));
LogID lastId;
memcpy(reinterpret_cast<void*>(&lastId), val.data(), sizeof(LogID));
TermID termId;
memcpy(reinterpret_cast<void*>(&termId), val.data() + sizeof(LogID), sizeof(TermID));
return std::make_pair(lastId, termId);
}
void Part::asyncPut(folly::StringPiece key, folly::StringPiece value, KVCallback cb) {
std::string log = encodeMultiValues(OP_PUT, key, value);
appendAsync(FLAGS_cluster_id, std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncMultiPut(const std::vector<KV>& keyValues, KVCallback cb) {
std::string log = encodeMultiValues(OP_MULTI_PUT, keyValues);
appendAsync(FLAGS_cluster_id, std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncRemove(folly::StringPiece key, KVCallback cb) {
std::string log = encodeSingleValue(OP_REMOVE, key);
appendAsync(FLAGS_cluster_id, std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncMultiRemove(const std::vector<std::string>& keys, KVCallback cb) {
std::string log = encodeMultiValues(OP_MULTI_REMOVE, keys);
appendAsync(FLAGS_cluster_id, std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncRemovePrefix(folly::StringPiece prefix, KVCallback cb) {
std::string log = encodeSingleValue(OP_REMOVE_PREFIX, prefix);
appendAsync(FLAGS_cluster_id, std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncRemoveRange(folly::StringPiece start,
folly::StringPiece end,
KVCallback cb) {
std::string log = encodeMultiValues(OP_REMOVE_RANGE, start, end);
appendAsync(FLAGS_cluster_id, std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::sync(KVCallback cb) {
sendCommandAsync("")
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncAtomicOp(raftex::AtomicOp op, KVCallback cb) {
atomicOpAsync(std::move(op)).then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncAddLearner(const HostAddr& learner, KVCallback cb) {
std::string log = encodeHost(OP_ADD_LEARNER, learner);
sendCommandAsync(std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncTransferLeader(const HostAddr& target, KVCallback cb) {
std::string log = encodeHost(OP_TRANS_LEADER, target);
sendCommandAsync(std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncAddPeer(const HostAddr& peer, KVCallback cb) {
std::string log = encodeHost(OP_ADD_PEER, peer);
sendCommandAsync(std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::asyncRemovePeer(const HostAddr& peer, KVCallback cb) {
std::string log = encodeHost(OP_REMOVE_PEER, peer);
sendCommandAsync(std::move(log))
.then([callback = std::move(cb)] (AppendLogResult res) mutable {
callback(toResultCode(res));
});
}
void Part::onLostLeadership(TermID term) {
VLOG(1) << "Lost the leadership for the term " << term;
}
void Part::onElected(TermID term) {
VLOG(1) << "Being elected as the leader for the term " << term;
}
void Part::onDiscoverNewLeader(HostAddr nLeader) {
LOG(INFO) << idStr_ << "Find the new leader " << nLeader;
if (newLeaderCb_) {
newLeaderCb_(nLeader);
}
}
bool Part::commitLogs(std::unique_ptr<LogIterator> iter) {
auto batch = engine_->startBatchWrite();
LogID lastId = -1;
TermID lastTerm = -1;
while (iter->valid()) {
lastId = iter->logId();
lastTerm = iter->logTerm();
auto log = iter->logMsg();
if (log.empty()) {
VLOG(3) << idStr_ << "Skip the heartbeat!";
++(*iter);
continue;
}
DCHECK_GE(log.size(), sizeof(int64_t) + 1 + sizeof(uint32_t));
// Skip the timestamp (type of int64_t)
switch (log[sizeof(int64_t)]) {
case OP_PUT: {
auto pieces = decodeMultiValues(log);
DCHECK_EQ(2, pieces.size());
if (batch->put(pieces[0], pieces[1]) != ResultCode::SUCCEEDED) {
LOG(ERROR) << idStr_ << "Failed to call WriteBatch::put()";
return false;
}
break;
}
case OP_MULTI_PUT: {
auto kvs = decodeMultiValues(log);
// Make the number of values are an even number
DCHECK_EQ((kvs.size() + 1) / 2, kvs.size() / 2);
for (size_t i = 0; i < kvs.size(); i += 2) {
if (batch->put(kvs[i], kvs[i + 1]) != ResultCode::SUCCEEDED) {
LOG(ERROR) << idStr_ << "Failed to call WriteBatch::put()";
return false;
}
}
break;
}
case OP_REMOVE: {
auto key = decodeSingleValue(log);
if (batch->remove(key) != ResultCode::SUCCEEDED) {
LOG(ERROR) << idStr_ << "Failed to call WriteBatch::remove()";
return false;
}
break;
}
case OP_MULTI_REMOVE: {
auto keys = decodeMultiValues(log);
for (auto k : keys) {
if (batch->remove(k) != ResultCode::SUCCEEDED) {
LOG(ERROR) << idStr_ << "Failed to call WriteBatch::remove()";
return false;
}
}
break;
}
case OP_REMOVE_PREFIX: {
auto prefix = decodeSingleValue(log);
if (batch->removePrefix(prefix) != ResultCode::SUCCEEDED) {
LOG(ERROR) << idStr_ << "Failed to call WriteBatch::removePrefix()";
return false;
}
break;
}
case OP_REMOVE_RANGE: {
auto range = decodeMultiValues(log);
DCHECK_EQ(2, range.size());
if (batch->removeRange(range[0], range[1]) != ResultCode::SUCCEEDED) {
LOG(ERROR) << idStr_ << "Failed to call WriteBatch::removeRange()";
return false;
}
break;
}
case OP_ADD_PEER:
case OP_ADD_LEARNER: {
break;
}
case OP_TRANS_LEADER: {
auto newLeader = decodeHost(OP_TRANS_LEADER, log);
commitTransLeader(newLeader);
break;
}
case OP_REMOVE_PEER: {
auto peer = decodeHost(OP_REMOVE_PEER, log);
commitRemovePeer(peer);
break;
}
default: {
LOG(FATAL) << idStr_ << "Unknown operation: " << static_cast<uint8_t>(log[0]);
}
}
++(*iter);
}
if (lastId >= 0) {
if (putCommitMsg(batch.get(), lastId, lastTerm) != ResultCode::SUCCEEDED) {
LOG(ERROR) << idStr_ << "Commit msg failed";
return false;
}
}
return engine_->commitBatchWrite(std::move(batch)) == ResultCode::SUCCEEDED;
}
std::pair<int64_t, int64_t> Part::commitSnapshot(const std::vector<std::string>& rows,
LogID committedLogId,
TermID committedLogTerm,
bool finished) {
auto batch = engine_->startBatchWrite();
int64_t count = 0;
int64_t size = 0;
for (auto& row : rows) {
count++;
size += row.size();
auto kv = decodeKV(row);
if (ResultCode::SUCCEEDED != batch->put(kv.first, kv.second)) {
LOG(ERROR) << idStr_ << "Put failed in commit";
return std::make_pair(0, 0);
}
}
if (finished) {
if (ResultCode::SUCCEEDED != putCommitMsg(batch.get(), committedLogId, committedLogTerm)) {
LOG(ERROR) << idStr_ << "Put failed in commit";
return std::make_pair(0, 0);
}
}
if (ResultCode::SUCCEEDED != engine_->commitBatchWrite(std::move(batch))) {
LOG(ERROR) << idStr_ << "Put failed in commit";
return std::make_pair(0, 0);
}
return std::make_pair(count, size);
}
ResultCode Part::putCommitMsg(WriteBatch* batch, LogID committedLogId, TermID committedLogTerm) {
std::string commitMsg;
commitMsg.reserve(sizeof(LogID) + sizeof(TermID));
commitMsg.append(reinterpret_cast<char*>(&committedLogId), sizeof(LogID));
commitMsg.append(reinterpret_cast<char*>(&committedLogTerm), sizeof(TermID));
return batch->put(NebulaKeyUtils::systemCommitKey(partId_), commitMsg);
}
bool Part::preProcessLog(LogID logId,
TermID termId,
ClusterID clusterId,
const std::string& log) {
VLOG(3) << idStr_ << "logId " << logId
<< ", termId " << termId
<< ", clusterId " << clusterId;
if (!log.empty()) {
switch (log[sizeof(int64_t)]) {
case OP_ADD_LEARNER: {
auto learner = decodeHost(OP_ADD_LEARNER, log);
addLearner(learner);
break;
}
case OP_TRANS_LEADER: {
auto newLeader = decodeHost(OP_TRANS_LEADER, log);
preProcessTransLeader(newLeader);
break;
}
case OP_ADD_PEER: {
auto peer = decodeHost(OP_ADD_PEER, log);
addPeer(peer);
break;
}
case OP_REMOVE_PEER: {
auto peer = decodeHost(OP_REMOVE_PEER, log);
preProcessRemovePeer(peer);
break;
}
default: {
break;
}
}
}
return true;
}
} // namespace kvstore
} // namespace nebula
| 1 | 22,373 | move to after check log.empty() | vesoft-inc-nebula | cpp |
@@ -19,6 +19,7 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Comments and reviews for records: web interface """
+from docutils.nodes import note
__lastupdated__ = """$Date$"""
| 1 | # -*- coding: utf-8 -*-
# Comments and reviews for records.
# This file is part of Invenio.
# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Comments and reviews for records: web interface """
__lastupdated__ = """$Date$"""
__revision__ = """$Id$"""
import cgi
from invenio.webcomment import check_recID_is_in_range, \
perform_request_display_comments_or_remarks, \
perform_request_add_comment_or_remark, \
perform_request_vote, \
perform_request_report, \
subscribe_user_to_discussion, \
unsubscribe_user_from_discussion, \
get_user_subscription_to_discussion, \
check_user_can_attach_file_to_comments, \
check_user_can_view_comments, \
check_user_can_send_comments, \
check_user_can_view_comment, \
query_get_comment, \
toggle_comment_visibility, \
check_comment_belongs_to_record, \
is_comment_deleted, \
perform_display_your_comments
from invenio.config import \
CFG_TMPSHAREDDIR, \
CFG_SITE_LANG, \
CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_PREFIX, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_WEBCOMMENT_ALLOW_COMMENTS,\
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBCOMMENT_USE_MATHJAX_IN_COMMENTS, \
CFG_SITE_RECORD, \
CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE, \
CFG_WEBCOMMENT_MAX_ATTACHED_FILES, \
CFG_ACCESS_CONTROL_LEVEL_SITE
from invenio.webuser import getUid, page_not_authorized, isGuestUser, collect_user_info
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.search_engine import create_navtrail_links, \
guess_primary_collection_of_a_record, \
get_colID
from invenio.urlutils import redirect_to_url, \
make_canonical_urlargd
from invenio.htmlutils import get_mathjax_header
from invenio.errorlib import register_exception
from invenio.messages import gettext_set_language
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.access_control_config import VIEWRESTRCOLL
from invenio.access_control_mailcookie import \
mail_cookie_create_authorize_action, \
mail_cookie_create_common, \
mail_cookie_check_common, \
InvenioWebAccessMailCookieDeletedError, \
InvenioWebAccessMailCookieError
from invenio.webcomment_config import \
InvenioWebCommentError, \
InvenioWebCommentWarning
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
websearch_templates = invenio.template.load('websearch')
import os
from invenio import webinterface_handler_config as apache
from invenio.bibdocfile import \
stream_file, \
decompose_file, \
propose_next_docname
class WebInterfaceCommentsPages(WebInterfaceDirectory):
"""Defines the set of /comments pages."""
_exports = ['', 'display', 'add', 'vote', 'report', 'index', 'attachments',
'subscribe', 'unsubscribe', 'toggle']
def __init__(self, recid=-1, reviews=0):
self.recid = recid
self.discussion = reviews # 0:comments, 1:reviews
self.attachments = WebInterfaceCommentsFiles(recid, reviews)
def index(self, req, form):
"""
Redirects to display function
"""
return self.display(req, form)
def display(self, req, form):
"""
Display comments (reviews if enabled) associated with record having id recid where recid>0.
This function can also be used to display remarks associated with basket having id recid where recid<-99.
@param ln: language
@param recid: record id, integer
@param do: display order hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param ds: display since all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb: number of results per page
@param p: results page
@param voted: boolean, active if user voted for a review, see vote function
@param reported: int, active if user reported a certain comment/review, see report function
@param reviews: boolean, enabled for reviews, disabled for comments
@param subscribed: int, 1 if user just subscribed to discussion, -1 if unsubscribed
@return the full html page.
"""
argd = wash_urlargd(form, {'do': (str, "od"),
'ds': (str, "all"),
'nb': (int, 100),
'p': (int, 1),
'voted': (int, -1),
'reported': (int, -1),
'subscribed': (int, 0),
'cmtgrp': (list, ["latest"]) # 'latest' is now a reserved group/round name
})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
can_send_comments = False
(auth_code, auth_msg) = check_user_can_send_comments(user_info, self.recid)
if not auth_code:
can_send_comments = True
can_attach_files = False
(auth_code, auth_msg) = check_user_can_attach_file_to_comments(user_info, self.recid)
if not auth_code and (user_info['email'] != 'guest'):
can_attach_files = True
subscription = get_user_subscription_to_discussion(self.recid, uid)
if subscription == 1:
user_is_subscribed_to_discussion = True
user_can_unsubscribe_from_discussion = True
elif subscription == 2:
user_is_subscribed_to_discussion = True
user_can_unsubscribe_from_discussion = False
else:
user_is_subscribed_to_discussion = False
user_can_unsubscribe_from_discussion = False
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(self.recid)),
self.recid,
ln=argd['ln'])
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x, y: cmp(x[1], y[1]))
link_ln = ''
if argd['ln'] != CFG_SITE_LANG:
link_ln = '?ln=%s' % argd['ln']
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/record/%s/%s%s' % (CFG_SITE_URL, self.recid, tab_id, link_ln), \
tab_id in ['comments', 'reviews'],
unordered_tabs[tab_id]['enabled']) \
for (tab_id, order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
tabs_counts = get_detailed_page_tabs_counts(self.recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
top = webstyle_templates.detailed_record_container_top(self.recid,
tabs,
argd['ln'],
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions)
bottom = webstyle_templates.detailed_record_container_bottom(self.recid,
tabs,
argd['ln'])
#display_comment_rounds = [cmtgrp for cmtgrp in argd['cmtgrp'] if cmtgrp.isdigit() or cmtgrp == "all" or cmtgrp == "-1"]
display_comment_rounds = argd['cmtgrp']
check_warnings = []
(ok, problem) = check_recID_is_in_range(self.recid, check_warnings, argd['ln'])
if ok:
body = perform_request_display_comments_or_remarks(req=req, recID=self.recid,
display_order=argd['do'],
display_since=argd['ds'],
nb_per_page=argd['nb'],
page=argd['p'],
ln=argd['ln'],
voted=argd['voted'],
reported=argd['reported'],
subscribed=argd['subscribed'],
reviews=self.discussion,
uid=uid,
can_send_comments=can_send_comments,
can_attach_files=can_attach_files,
user_is_subscribed_to_discussion=user_is_subscribed_to_discussion,
user_can_unsubscribe_from_discussion=user_can_unsubscribe_from_discussion,
display_comment_rounds=display_comment_rounds
)
title, description, keywords = websearch_templates.tmpl_record_page_header_content(req, self.recid, argd['ln'])
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid), ln=argd['ln'])
if navtrail:
navtrail += ' > '
navtrail += '<a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += cgi.escape(title)
navtrail += '</a>'
navtrail += ' > <a class="navtrail">%s</a>' % (self.discussion==1 and _("Reviews") or _("Comments"))
mathjaxheader = ''
if CFG_WEBCOMMENT_USE_MATHJAX_IN_COMMENTS:
mathjaxheader = get_mathjax_header(req.is_https())
jqueryheader = '''
<script src="%(CFG_SITE_URL)s/js/jquery.MultiFile.pack.js" type="text/javascript" language="javascript"></script>
''' % {'CFG_SITE_URL': CFG_SITE_URL}
return pageheaderonly(title=title,
navtrail=navtrail,
uid=uid,
verbose=1,
metaheaderadd = mathjaxheader + jqueryheader,
req=req,
language=argd['ln'],
navmenuid='search',
navtrail_append_title_p=0) + \
websearch_templates.tmpl_search_pagestart(argd['ln']) + \
top + body + bottom + \
websearch_templates.tmpl_search_pageend(argd['ln']) + \
pagefooteronly(lastupdated=__lastupdated__, language=argd['ln'], req=req)
else:
return page(title=_("Record Not Found"),
body=problem,
uid=uid,
verbose=1,
req=req,
language=argd['ln'],
navmenuid='search')
# Return the same page wether we ask for /CFG_SITE_RECORD/123 or /CFG_SITE_RECORD/123/
__call__ = index
def add(self, req, form):
"""
Add a comment (review) to record with id recid where recid>0
Also works for adding a remark to basket with id recid where recid<-99
@param ln: languange
@param recid: record id
@param action: 'DISPLAY' to display add form
'SUBMIT' to submit comment once form is filled
'REPLY' to reply to an already existing comment
@param msg: the body of the comment/review or remark
@param score: star score of the review
@param note: title of the review
@param comid: comment id, needed for replying
@param editor_type: the type of editor used for submitting the
comment: 'textarea', 'ckeditor'.
@param subscribe: if set, subscribe user to receive email
notifications when new comment are added to
this discussion
@return the full html page.
"""
argd = wash_urlargd(form, {'action': (str, "DISPLAY"),
'msg': (str, ""),
'note': (str, ''),
'score': (int, 0),
'comid': (int, 0),
'editor_type': (str, ""),
'subscribe': (str, ""),
'cookie': (str, "")
})
_ = gettext_set_language(argd['ln'])
actions = ['DISPLAY', 'REPLY', 'SUBMIT']
uid = getUid(req)
# Is site ready to accept comments?
if uid == -1 or (not CFG_WEBCOMMENT_ALLOW_COMMENTS and not CFG_WEBCOMMENT_ALLOW_REVIEWS):
return page_not_authorized(req, "../comments/add",
navmenuid='search')
# Is user allowed to post comment?
user_info = collect_user_info(req)
(auth_code_1, auth_msg_1) = check_user_can_view_comments(user_info, self.recid)
(auth_code_2, auth_msg_2) = check_user_can_send_comments(user_info, self.recid)
if isGuestUser(uid):
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
# Save user's value in cookie, so that these "POST"
# parameters are not lost during login process
msg_cookie = mail_cookie_create_common('comment_msg',
{'msg': argd['msg'],
'note': argd['note'],
'score': argd['score'],
'editor_type': argd['editor_type'],
'subscribe': argd['subscribe']},
onetime=True)
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri'] + '&cookie=' + msg_cookie}, {})
return redirect_to_url(req, target, norobot=True)
elif (auth_code_1 or auth_code_2):
return page_not_authorized(req, "../", \
text = auth_msg_1 + auth_msg_2)
if argd['comid']:
# If replying to a comment, are we on a record that
# matches the original comment user is replying to?
if not check_comment_belongs_to_record(argd['comid'], self.recid):
return page_not_authorized(req, "../", \
text = _("Specified comment does not belong to this record"))
# Is user trying to reply to a restricted comment? Make
# sure user has access to it. We will then inherit its
# restriction for the new comment
(auth_code, auth_msg) = check_user_can_view_comment(user_info, argd['comid'])
if auth_code:
return page_not_authorized(req, "../", \
text = _("You do not have access to the specified comment"))
# Is user trying to reply to a deleted comment? If so, we
# let submitted comment go (to not lose possibly submitted
# content, if comment is submitted while original is
# deleted), but we "reset" comid to make sure that for
# action 'REPLY' the original comment is not included in
# the reply
if is_comment_deleted(argd['comid']):
argd['comid'] = 0
user_info = collect_user_info(req)
can_attach_files = False
(auth_code, auth_msg) = check_user_can_attach_file_to_comments(user_info, self.recid)
if not auth_code and (user_info['email'] != 'guest'):
can_attach_files = True
warning_msgs = [] # list of warning tuples (warning_text, warning_color)
added_files = {}
if can_attach_files:
# User is allowed to attach files. Process the files
file_too_big = False
formfields = form.get('commentattachment[]', [])
if not hasattr(formfields, "__getitem__"): # A single file was uploaded
formfields = [formfields]
for formfield in formfields[:CFG_WEBCOMMENT_MAX_ATTACHED_FILES]:
if hasattr(formfield, "filename") and formfield.filename:
filename = formfield.filename
dir_to_open = os.path.join(CFG_TMPSHAREDDIR, 'webcomment', str(uid))
try:
assert(dir_to_open.startswith(CFG_TMPSHAREDDIR))
except AssertionError:
register_exception(req=req,
prefix='User #%s tried to upload file to forbidden location: %s' \
% (uid, dir_to_open))
if not os.path.exists(dir_to_open):
try:
os.makedirs(dir_to_open)
except:
register_exception(req=req, alert_admin=True)
## Before saving the file to disc, wash the filename (in particular
## washing away UNIX and Windows (e.g. DFS) paths):
filename = os.path.basename(filename.split('\\')[-1])
filename = filename.strip()
if filename != "":
# Check that file does not already exist
n = 1
while os.path.exists(os.path.join(dir_to_open, filename)):
basedir, name, extension = decompose_file(filename)
new_name = propose_next_docname(name)
filename = new_name + extension
fp = open(os.path.join(dir_to_open, filename), "w")
# FIXME: temporary, waiting for wsgi handler to be
# fixed. Once done, read chunk by chunk
# while formfield.file:
# fp.write(formfield.file.read(10240))
fp.write(formfield.file.read())
fp.close()
# Isn't this file too big?
file_size = os.path.getsize(os.path.join(dir_to_open, filename))
if CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE > 0 and \
file_size > CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE:
os.remove(os.path.join(dir_to_open, filename))
# One file is too big: record that,
# dismiss all uploaded files and re-ask to
# upload again
file_too_big = True
try:
raise InvenioWebCommentWarning(_('The size of file \\"%s\\" (%s) is larger than maximum allowed file size (%s). Select files again.') % (cgi.escape(filename), str(file_size/1024) + 'KB', str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/1024) + 'KB'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warning_msgs.append((exc.message, ''))
#warning_msgs.append(('WRN_WEBCOMMENT_MAX_FILE_SIZE_REACHED', cgi.escape(filename), str(file_size/1024) + 'KB', str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/1024) + 'KB'))
else:
added_files[filename] = os.path.join(dir_to_open, filename)
if file_too_big:
# One file was too big. Removed all uploaded filed
for filepath in added_files.items():
try:
os.remove(filepath)
except:
# File was already removed or does not exist?
pass
client_ip_address = req.remote_ip
check_warnings = []
(ok, problem) = check_recID_is_in_range(self.recid, check_warnings, argd['ln'])
if ok:
title, description, keywords = websearch_templates.tmpl_record_page_header_content(req,
self.recid,
argd['ln'])
navtrail = create_navtrail_links(cc=guess_primary_collection_of_a_record(self.recid))
if navtrail:
navtrail += ' > '
navtrail += '<a class="navtrail" href="%s/%s/%s?ln=%s">'% (CFG_SITE_URL, CFG_SITE_RECORD, self.recid, argd['ln'])
navtrail += cgi.escape(title)
navtrail += '</a>'
navtrail += '> <a class="navtrail" href="%s/%s/%s/%s/?ln=%s">%s</a>' % (CFG_SITE_URL,
CFG_SITE_RECORD,
self.recid,
self.discussion==1 and 'reviews' or 'comments',
argd['ln'],
self.discussion==1 and _('Reviews') or _('Comments'))
if argd['action'] not in actions:
argd['action'] = 'DISPLAY'
if not argd['msg']:
# User had to login in-between, so retrieve msg
# from cookie
try:
(kind, cookie_argd) = mail_cookie_check_common(argd['cookie'],
delete=True)
argd.update(cookie_argd)
except InvenioWebAccessMailCookieDeletedError, e:
return redirect_to_url(req, CFG_SITE_SECURE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(self.recid) + (self.discussion==1 and \
'/reviews' or '/comments'))
except InvenioWebAccessMailCookieError, e:
# Invalid or empty cookie: continue
pass
subscribe = False
if argd['subscribe'] and \
get_user_subscription_to_discussion(self.recid, uid) == 0:
# User is not already subscribed, and asked to subscribe
subscribe = True
body = perform_request_add_comment_or_remark(recID=self.recid,
ln=argd['ln'],
uid=uid,
action=argd['action'],
msg=argd['msg'],
note=argd['note'],
score=argd['score'],
reviews=self.discussion,
comID=argd['comid'],
client_ip_address=client_ip_address,
editor_type=argd['editor_type'],
can_attach_files=can_attach_files,
subscribe=subscribe,
req=req,
attached_files=added_files,
warnings=warning_msgs)
if self.discussion:
title = _("Add Review")
else:
title = _("Add Comment")
jqueryheader = '''
<script src="%(CFG_SITE_URL)s/js/jquery.MultiFile.pack.js" type="text/javascript" language="javascript"></script>
''' % {'CFG_SITE_URL': CFG_SITE_URL}
return page(title=title,
body=body,
navtrail=navtrail,
uid=uid,
language=CFG_SITE_LANG,
verbose=1,
req=req,
navmenuid='search',
metaheaderadd=jqueryheader)
# id not in range
else:
return page(title=_("Record Not Found"),
body=problem,
uid=uid,
verbose=1,
req=req,
navmenuid='search')
def vote(self, req, form):
"""
Vote positively or negatively for a comment/review.
@param comid: comment/review id
@param com_value: +1 to vote positively
-1 to vote negatively
@param recid: the id of the record the comment/review is associated with
@param ln: language
@param do: display order hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param ds: display since all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb: number of results per page
@param p: results page
@param referer: http address of the calling function to redirect to (refresh)
@param reviews: boolean, enabled for reviews, disabled for comments
"""
argd = wash_urlargd(form, {'comid': (int, -1),
'com_value': (int, 0),
'recid': (int, -1),
'do': (str, "od"),
'ds': (str, "all"),
'nb': (int, 100),
'p': (int, 1),
'referer': (str, None)
})
_ = gettext_set_language(argd['ln'])
client_ip_address = req.remote_ip
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
# Check that comment belongs to this recid
if not check_comment_belongs_to_record(argd['comid'], self.recid):
return page_not_authorized(req, "../", \
text = _("Specified comment does not belong to this record"))
# Check that user can access the record
(auth_code, auth_msg) = check_user_can_view_comment(user_info, argd['comid'])
if auth_code:
return page_not_authorized(req, "../", \
text = _("You do not have access to the specified comment"))
# Check that comment is not currently deleted
if is_comment_deleted(argd['comid']):
return page_not_authorized(req, "../", \
text = _("You cannot vote for a deleted comment"),
ln=argd['ln'])
success = perform_request_vote(argd['comid'], client_ip_address, argd['com_value'], uid)
if argd['referer']:
argd['referer'] += "?ln=%s&do=%s&ds=%s&nb=%s&p=%s&voted=%s&" % (
argd['ln'], argd['do'], argd['ds'], argd['nb'], argd['p'], success)
redirect_to_url(req, argd['referer'])
else:
#Note: sent to comments display
referer = "%s/%s/%s/%s?&ln=%s&voted=1"
referer %= (CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, self.discussion == 1 and 'reviews' or 'comments', argd['ln'])
redirect_to_url(req, referer)
def report(self, req, form):
"""
Report a comment/review for inappropriate content
@param comid: comment/review id
@param recid: the id of the record the comment/review is associated with
@param ln: language
@param do: display order hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param ds: display since all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb: number of results per page
@param p: results page
@param referer: http address of the calling function to redirect to (refresh)
@param reviews: boolean, enabled for reviews, disabled for comments
"""
argd = wash_urlargd(form, {'comid': (int, -1),
'recid': (int, -1),
'do': (str, "od"),
'ds': (str, "all"),
'nb': (int, 100),
'p': (int, 1),
'referer': (str, None)
})
_ = gettext_set_language(argd['ln'])
client_ip_address = req.remote_ip
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if isGuestUser(uid):
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
# Check that comment belongs to this recid
if not check_comment_belongs_to_record(argd['comid'], self.recid):
return page_not_authorized(req, "../", \
text = _("Specified comment does not belong to this record"))
# Check that user can access the record
(auth_code, auth_msg) = check_user_can_view_comment(user_info, argd['comid'])
if auth_code:
return page_not_authorized(req, "../", \
text = _("You do not have access to the specified comment"))
# Check that comment is not currently deleted
if is_comment_deleted(argd['comid']):
return page_not_authorized(req, "../", \
text = _("You cannot report a deleted comment"),
ln=argd['ln'])
success = perform_request_report(argd['comid'], client_ip_address, uid)
if argd['referer']:
argd['referer'] += "?ln=%s&do=%s&ds=%s&nb=%s&p=%s&reported=%s&" % (argd['ln'], argd['do'], argd['ds'], argd['nb'], argd['p'], str(success))
redirect_to_url(req, argd['referer'])
else:
#Note: sent to comments display
referer = "%s/%s/%s/%s/display?ln=%s&voted=1"
referer %= (CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, self.discussion==1 and 'reviews' or 'comments', argd['ln'])
redirect_to_url(req, referer)
def subscribe(self, req, form):
"""
Subscribe current user to receive email notification when new
comments are added to current discussion.
"""
argd = wash_urlargd(form, {'referer': (str, None)})
uid = getUid(req)
user_info = collect_user_info(req)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if isGuestUser(uid):
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
success = subscribe_user_to_discussion(self.recid, uid)
display_url = "%s/%s/%s/comments/display?subscribed=%s&ln=%s" % \
(CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, str(success), argd['ln'])
redirect_to_url(req, display_url)
def unsubscribe(self, req, form):
"""
Unsubscribe current user from current discussion.
"""
argd = wash_urlargd(form, {'referer': (str, None)})
user_info = collect_user_info(req)
uid = getUid(req)
if isGuestUser(uid):
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
success = unsubscribe_user_from_discussion(self.recid, uid)
display_url = "%s/%s/%s/comments/display?subscribed=%s&ln=%s" % \
(CFG_SITE_SECURE_URL, CFG_SITE_RECORD, self.recid, str(-success), argd['ln'])
redirect_to_url(req, display_url)
def toggle(self, req, form):
"""
Store the visibility of a comment for current user
"""
argd = wash_urlargd(form, {'comid': (int, -1),
'referer': (str, None),
'collapse': (int, 1)})
uid = getUid(req)
if isGuestUser(uid):
# We do not store information for guests
return ''
toggle_comment_visibility(uid, argd['comid'], argd['collapse'], self.recid)
if argd['referer']:
return redirect_to_url(req, CFG_SITE_SECURE_URL + \
(not argd['referer'].startswith('/') and '/' or '') + \
argd['referer'] + '#' + str(argd['comid']))
class WebInterfaceCommentsFiles(WebInterfaceDirectory):
"""Handle <strike>upload and </strike> access to files for comments.
<strike>The upload is currently only available through the Ckeditor.</strike>
"""
#_exports = ['put'] # 'get' is handled by _lookup(..)
def __init__(self, recid=-1, reviews=0):
self.recid = recid
self.discussion = reviews # 0:comments, 1:reviews
def _lookup(self, component, path):
""" This handler is invoked for the dynamic URLs (for getting
<strike>and putting attachments</strike>) Eg:
CFG_SITE_URL/CFG_SITE_RECORD/5953/comments/attachments/get/652/myfile.pdf
"""
if component == 'get' and len(path) > 1:
comid = path[0] # comment ID
file_name = '/'.join(path[1:]) # the filename
def answer_get(req, form):
"""Accessing files attached to comments."""
form['file'] = file_name
form['comid'] = comid
return self._get(req, form)
return answer_get, []
# All other cases: file not found
return None, []
def _get(self, req, form):
"""
Returns a file attached to a comment.
Example:
CFG_SITE_URL/CFG_SITE_RECORD/5953/comments/attachments/get/652/myfile.pdf
where 652 is the comment ID
"""
argd = wash_urlargd(form, {'file': (str, None),
'comid': (int, 0)})
_ = gettext_set_language(argd['ln'])
# Can user view this record, i.e. can user access its
# attachments?
uid = getUid(req)
user_info = collect_user_info(req)
# Check that user can view record, and its comments (protected
# with action "viewcomment")
(auth_code, auth_msg) = check_user_can_view_comments(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg)
# Does comment exist?
if not query_get_comment(argd['comid']):
req.status = apache.HTTP_NOT_FOUND
return page(title=_("Page Not Found"),
body=_('The requested comment could not be found'),
req=req)
# Check that user can view this particular comment, protected
# using its own restriction
(auth_code, auth_msg) = check_user_can_view_comment(user_info, argd['comid'])
if auth_code and user_info['email'] == 'guest':
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target)
elif auth_code:
return page_not_authorized(req, "../", \
text = auth_msg,
ln=argd['ln'])
# Check that comment is not currently deleted
if is_comment_deleted(argd['comid']):
return page_not_authorized(req, "../", \
text = _("You cannot access files of a deleted comment"),
ln=argd['ln'])
if not argd['file'] is None:
# Prepare path to file on disk. Normalize the path so that
# ../ and other dangerous components are removed.
path = os.path.abspath(CFG_PREFIX + '/var/data/comments/' + \
str(self.recid) + '/' + str(argd['comid']) + \
'/' + argd['file'])
# Check that we are really accessing attachements
# directory, for the declared record.
if path.startswith(CFG_PREFIX + '/var/data/comments/' + \
str(self.recid)) and \
os.path.exists(path):
return stream_file(req, path)
# Send error 404 in all other cases
req.status = apache.HTTP_NOT_FOUND
return page(title=_("Page Not Found"),
body=_('The requested file could not be found'),
req=req,
language=argd['ln'])
class WebInterfaceYourCommentsPages(WebInterfaceDirectory):
"""Defines the set of /yourcomments pages."""
_exports = ['', ]
def index(self, req, form):
"""Index page."""
argd = wash_urlargd(form, {'page': (int, 1),
'format': (str, "rc"),
'order_by': (str, "lcf"),
'per_page': (str, "all"),
})
# TODO: support also "reviews", by adding new option to show/hide them if needed
uid = getUid(req)
# load the right language
_ = gettext_set_language(argd['ln'])
# Is site ready to accept comments?
if not CFG_WEBCOMMENT_ALLOW_COMMENTS or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "%s/yourcomments" % \
(CFG_SITE_SECURE_URL,),
text="Comments are currently disabled on this site",
navmenuid="yourcomments")
elif uid == -1 or isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourcomments%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_sendcomments']:
# Maybe we should still authorize if user submitted
# comments in the past?
return page_not_authorized(req, "../", \
text = _("You are not authorized to use comments."))
return page(title=_("Your Comments"),
body=perform_display_your_comments(user_info,
page_number=argd['page'],
selected_order_by_option=argd['order_by'],
selected_display_number_option=argd['per_page'],
selected_display_format_option=argd['format'],
ln=argd['ln']),
navtrail= """<a class="navtrail" href="%(sitesecureurl)s/youraccount/display?ln=%(ln)s">%(account)s</a>""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'ln': argd['ln'],
'account' : _("Your Account"),
},
description=_("%s View your previously submitted comments") % CFG_SITE_NAME_INTL.get(argd['ln'], CFG_SITE_NAME),
keywords=_("%s, personalize") % CFG_SITE_NAME_INTL.get(argd['ln'], CFG_SITE_NAME),
uid=uid,
language=argd['ln'],
req=req,
lastupdated=__lastupdated__,
navmenuid='youralerts',
secure_page_p=1)
# Return the same page wether we ask for /CFG_SITE_RECORD/123 or /CFG_SITE_RECORD/123/
__call__ = index
| 1 | 15,156 | This line needs to be removed | inveniosoftware-invenio | py |
@@ -68,7 +68,7 @@ type CloudBuildSourceSpec struct {
const (
// CloudBuildSource CloudEvent type
- CloudBuildSourceEvent = "com.google.cloud.build.event"
+ CloudBuildSourceEvent = "google.cloud.cloudbuild.build.v1.statusChanged"
// CloudBuildSourceBuildId is the Pub/Sub message attribute key with the CloudBuildSource's buildId.
CloudBuildSourceBuildId = "buildId"
// CloudBuildSourceBuildStatus is the Pub/Sub message attribute key with the CloudBuildSource's build status. | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
duckv1alpha1 "github.com/google/knative-gcp/pkg/apis/duck/v1alpha1"
kngcpduck "github.com/google/knative-gcp/pkg/duck/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/webhook/resourcesemantics"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/apis"
)
// CloudBuildSource is a specification for a CloudBuildSource resource.
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type CloudBuildSource struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CloudBuildSourceSpec `json:"spec,omitempty"`
Status CloudBuildSourceStatus `json:"status,omitempty"`
}
// Verify that CloudBuildSource matches various duck types.
var (
_ apis.Convertible = (*CloudBuildSource)(nil)
_ apis.Defaultable = (*CloudBuildSource)(nil)
_ apis.Validatable = (*CloudBuildSource)(nil)
_ runtime.Object = (*CloudBuildSource)(nil)
_ kmeta.OwnerRefable = (*CloudBuildSource)(nil)
_ resourcesemantics.GenericCRD = (*CloudBuildSource)(nil)
_ kngcpduck.Identifiable = (*CloudBuildSource)(nil)
_ kngcpduck.PubSubable = (*CloudBuildSource)(nil)
)
// CloudBuildSourceSpec defines the desired state of the CloudBuildSource.
type CloudBuildSourceSpec struct {
// This brings in the PubSub based Source Specs. Includes:
// Sink, CloudEventOverrides, Secret and Project
duckv1alpha1.PubSubSpec `json:",inline"`
// Topic is the ID of the PubSub Topic to Subscribe to. It must
// be in the form of the unique identifier within the project, not the
// entire name. E.g. it must be 'laconia', not
// 'projects/my-proj/topics/laconia'.
// It is optional. Defaults to 'cloud-builds' and the topic must be 'cloud-builds'
// +optional
Topic *string `json:"topic,omitempty"`
}
const (
// CloudBuildSource CloudEvent type
CloudBuildSourceEvent = "com.google.cloud.build.event"
// CloudBuildSourceBuildId is the Pub/Sub message attribute key with the CloudBuildSource's buildId.
CloudBuildSourceBuildId = "buildId"
// CloudBuildSourceBuildStatus is the Pub/Sub message attribute key with the CloudBuildSource's build status.
CloudBuildSourceBuildStatus = "status"
)
// CloudBuildSourceEventSource returns the Cloud Build CloudEvent source value.
func CloudBuildSourceEventSource(googleCloudProject, buildId string) string {
return fmt.Sprintf("//cloudbuild.googleapis.com/projects/%s/builds/%s", googleCloudProject, buildId)
}
const (
// CloudBuildSourceConditionReady has status True when the CloudBuildSource is
// ready to send events.
CloudBuildSourceConditionReady = apis.ConditionReady
)
var buildCondSet = apis.NewLivingConditionSet(
duckv1alpha1.PullSubscriptionReady,
)
// CloudBuildSourceStatus defines the observed state of CloudBuildSource.
type CloudBuildSourceStatus struct {
duckv1alpha1.PubSubStatus `json:",inline"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CloudBuildSourceList contains a list of CloudBuildSources.
type CloudBuildSourceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CloudBuildSource `json:"items"`
}
// Methods for pubsubable interface
func (*CloudBuildSource) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("CloudBuildSource")
}
// Methods for identifiable interface.
// IdentitySpec returns the IdentitySpec portion of the Spec.
func (s *CloudBuildSource) IdentitySpec() *duckv1alpha1.IdentitySpec {
return &s.Spec.IdentitySpec
}
// IdentityStatus returns the IdentityStatus portion of the Status.
func (s *CloudBuildSource) IdentityStatus() *duckv1alpha1.IdentityStatus {
return &s.Status.IdentityStatus
}
// CloudBuildSourceSpec returns the CloudBuildSourceSpec portion of the Spec.
func (bs *CloudBuildSource) PubSubSpec() *duckv1alpha1.PubSubSpec {
return &bs.Spec.PubSubSpec
}
// PubSubStatus returns the PubSubStatus portion of the Status.
func (bs *CloudBuildSource) PubSubStatus() *duckv1alpha1.PubSubStatus {
return &bs.Status.PubSubStatus
}
// ConditionSet returns the apis.ConditionSet of the embedding object.
func (bs *CloudBuildSource) ConditionSet() *apis.ConditionSet {
return &buildCondSet
}
| 1 | 17,453 | Can we delete such const like `CloudBuildSourceEvent` and `CloudBuildSourceBuildId` since all those are contained under schemas/v1? | google-knative-gcp | go |
@@ -0,0 +1,8 @@
+// Package vsphere contains vSphere-specific structures for installer
+// configuration and management.
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/hive/pkg/apis/hive
+package vsphere
+
+// Name is name for the vsphere platform.
+const Name string = "vsphere" | 1 | 1 | 12,188 | This is unused. | openshift-hive | go |
|
@@ -32,9 +32,13 @@ namespace OpenTelemetry.Instrumentation.Http
public bool SetHttpFlavor { get; set; }
/// <summary>
- /// Gets or sets <see cref="ITextFormat"/> for context propagation. Default value: <see cref="TraceContextFormat"/>.
+ /// Gets or sets <see cref="ITextFormat"/> for context propagation. Default value: <see cref="CompositePropagator"/>.
/// </summary>
- public ITextFormat TextFormat { get; set; } = new TraceContextFormat();
+ public ITextFormat TextFormat { get; set; } = new CompositePropagator(new System.Collections.Generic.List<ITextFormat>
+ {
+ new TraceContextFormat(),
+ new BaggageFormat(),
+ });
/// <summary>
/// Gets or sets an optional callback method for filtering <see cref="HttpWebRequest"/> requests that are sent through the instrumentation. | 1 | // <copyright file="HttpWebRequestInstrumentationOptions.netfx.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
#if NETFRAMEWORK
using System;
using System.Net;
using OpenTelemetry.Context.Propagation;
using OpenTelemetry.Trace;
namespace OpenTelemetry.Instrumentation.Http
{
/// <summary>
/// Options for HttpWebRequest instrumentation.
/// </summary>
public class HttpWebRequestInstrumentationOptions
{
/// <summary>
/// Gets or sets a value indicating whether or not the HTTP version should be added as the <see cref="SemanticConventions.AttributeHttpFlavor"/> tag. Default value: False.
/// </summary>
public bool SetHttpFlavor { get; set; }
/// <summary>
/// Gets or sets <see cref="ITextFormat"/> for context propagation. Default value: <see cref="TraceContextFormat"/>.
/// </summary>
public ITextFormat TextFormat { get; set; } = new TraceContextFormat();
/// <summary>
/// Gets or sets an optional callback method for filtering <see cref="HttpWebRequest"/> requests that are sent through the instrumentation.
/// </summary>
public Func<HttpWebRequest, bool> FilterFunc { get; set; }
internal bool EventFilter(HttpWebRequest request)
{
Uri requestUri;
if (request.Method == "POST"
&& (requestUri = request.RequestUri) != null
&& HttpClientInstrumentationOptions.IsInternalUrl(requestUri))
{
return false;
}
return this.FilterFunc?.Invoke(request) ?? true;
}
}
}
#endif
| 1 | 16,073 | Any reason not to put `using System.Collections.Generic` (guess there shouldn't be naming conflicts)? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -0,0 +1,8 @@
+class TwitterPlayerCardsController < ApplicationController
+ layout false
+
+ def show
+ response.headers.delete("X-Frame-Options")
+ @video = Video.find(params[:video_id])
+ end
+end | 1 | 1 | 12,842 | Could the name of this controller just be shortened to `TwitterCardsController`? Is the `Player` there adding information? | thoughtbot-upcase | rb |
|
@@ -138,6 +138,9 @@ func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) {
if uh.Unhealthy {
return true
}
+ if uh.Dynamic {
+ return false
+ }
if uh.Fails >= u.MaxFails &&
u.MaxFails != 0 {
return true | 1 | package proxy
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/mholt/caddy/caddyfile"
"github.com/mholt/caddy/caddyhttp/httpserver"
)
var (
supportedPolicies = make(map[string]func() Policy)
)
type staticUpstream struct {
from string
upstreamHeaders http.Header
downstreamHeaders http.Header
Hosts HostPool
Policy Policy
insecureSkipVerify bool
FailTimeout time.Duration
MaxFails int32
MaxConns int64
HealthCheck struct {
Client http.Client
Path string
Interval time.Duration
Timeout time.Duration
}
WithoutPathPrefix string
IgnoredSubPaths []string
}
// NewStaticUpstreams parses the configuration input and sets up
// static upstreams for the proxy middleware.
func NewStaticUpstreams(c caddyfile.Dispenser) ([]Upstream, error) {
var upstreams []Upstream
for c.Next() {
upstream := &staticUpstream{
from: "",
upstreamHeaders: make(http.Header),
downstreamHeaders: make(http.Header),
Hosts: nil,
Policy: &Random{},
FailTimeout: 10 * time.Second,
MaxFails: 1,
MaxConns: 0,
}
if !c.Args(&upstream.from) {
return upstreams, c.ArgErr()
}
var to []string
for _, t := range c.RemainingArgs() {
parsed, err := parseUpstream(t)
if err != nil {
return upstreams, err
}
to = append(to, parsed...)
}
for c.NextBlock() {
switch c.Val() {
case "upstream":
if !c.NextArg() {
return upstreams, c.ArgErr()
}
parsed, err := parseUpstream(c.Val())
if err != nil {
return upstreams, err
}
to = append(to, parsed...)
default:
if err := parseBlock(&c, upstream); err != nil {
return upstreams, err
}
}
}
if len(to) == 0 {
return upstreams, c.ArgErr()
}
upstream.Hosts = make([]*UpstreamHost, len(to))
for i, host := range to {
uh, err := upstream.NewHost(host)
if err != nil {
return upstreams, err
}
upstream.Hosts[i] = uh
}
if upstream.HealthCheck.Path != "" {
upstream.HealthCheck.Client = http.Client{
Timeout: upstream.HealthCheck.Timeout,
}
go upstream.HealthCheckWorker(nil)
}
upstreams = append(upstreams, upstream)
}
return upstreams, nil
}
// RegisterPolicy adds a custom policy to the proxy.
func RegisterPolicy(name string, policy func() Policy) {
supportedPolicies[name] = policy
}
func (u *staticUpstream) From() string {
return u.from
}
func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) {
if !strings.HasPrefix(host, "http") &&
!strings.HasPrefix(host, "unix:") {
host = "http://" + host
}
uh := &UpstreamHost{
Name: host,
Conns: 0,
Fails: 0,
FailTimeout: u.FailTimeout,
Unhealthy: false,
UpstreamHeaders: u.upstreamHeaders,
DownstreamHeaders: u.downstreamHeaders,
CheckDown: func(u *staticUpstream) UpstreamHostDownFunc {
return func(uh *UpstreamHost) bool {
if uh.Unhealthy {
return true
}
if uh.Fails >= u.MaxFails &&
u.MaxFails != 0 {
return true
}
return false
}
}(u),
WithoutPathPrefix: u.WithoutPathPrefix,
MaxConns: u.MaxConns,
}
baseURL, err := url.Parse(uh.Name)
if err != nil {
return nil, err
}
uh.ReverseProxy = NewSingleHostReverseProxy(baseURL, uh.WithoutPathPrefix)
if u.insecureSkipVerify {
uh.ReverseProxy.Transport = InsecureTransport
}
return uh, nil
}
func parseUpstream(u string) ([]string, error) {
if !strings.HasPrefix(u, "unix:") {
colonIdx := strings.LastIndex(u, ":")
protoIdx := strings.Index(u, "://")
if colonIdx != -1 && colonIdx != protoIdx {
us := u[:colonIdx]
ue := ""
portsEnd := len(u)
if nextSlash := strings.Index(u[colonIdx:], "/"); nextSlash != -1 {
portsEnd = colonIdx + nextSlash
ue = u[portsEnd:]
}
ports := u[len(us)+1 : portsEnd]
if separators := strings.Count(ports, "-"); separators == 1 {
portsStr := strings.Split(ports, "-")
pIni, err := strconv.Atoi(portsStr[0])
if err != nil {
return nil, err
}
pEnd, err := strconv.Atoi(portsStr[1])
if err != nil {
return nil, err
}
if pEnd <= pIni {
return nil, fmt.Errorf("port range [%s] is invalid", ports)
}
hosts := []string{}
for p := pIni; p <= pEnd; p++ {
hosts = append(hosts, fmt.Sprintf("%s:%d%s", us, p, ue))
}
return hosts, nil
}
}
}
return []string{u}, nil
}
func parseBlock(c *caddyfile.Dispenser, u *staticUpstream) error {
switch c.Val() {
case "policy":
if !c.NextArg() {
return c.ArgErr()
}
policyCreateFunc, ok := supportedPolicies[c.Val()]
if !ok {
return c.ArgErr()
}
u.Policy = policyCreateFunc()
case "fail_timeout":
if !c.NextArg() {
return c.ArgErr()
}
dur, err := time.ParseDuration(c.Val())
if err != nil {
return err
}
u.FailTimeout = dur
case "max_fails":
if !c.NextArg() {
return c.ArgErr()
}
n, err := strconv.Atoi(c.Val())
if err != nil {
return err
}
u.MaxFails = int32(n)
case "max_conns":
if !c.NextArg() {
return c.ArgErr()
}
n, err := strconv.ParseInt(c.Val(), 10, 64)
if err != nil {
return err
}
u.MaxConns = n
case "health_check":
if !c.NextArg() {
return c.ArgErr()
}
u.HealthCheck.Path = c.Val()
// Set defaults
if u.HealthCheck.Interval == 0 {
u.HealthCheck.Interval = 30 * time.Second
}
if u.HealthCheck.Timeout == 0 {
u.HealthCheck.Timeout = 60 * time.Second
}
case "health_check_interval":
var interval string
if !c.Args(&interval) {
return c.ArgErr()
}
dur, err := time.ParseDuration(interval)
if err != nil {
return err
}
u.HealthCheck.Interval = dur
case "health_check_timeout":
var interval string
if !c.Args(&interval) {
return c.ArgErr()
}
dur, err := time.ParseDuration(interval)
if err != nil {
return err
}
u.HealthCheck.Timeout = dur
case "proxy_header": // TODO: deprecate this shortly after 0.9
fallthrough
case "header_upstream":
var header, value string
if !c.Args(&header, &value) {
return c.ArgErr()
}
u.upstreamHeaders.Add(header, value)
case "header_downstream":
var header, value string
if !c.Args(&header, &value) {
return c.ArgErr()
}
u.downstreamHeaders.Add(header, value)
case "transparent":
u.upstreamHeaders.Add("Host", "{host}")
u.upstreamHeaders.Add("X-Real-IP", "{remote}")
u.upstreamHeaders.Add("X-Forwarded-For", "{remote}")
u.upstreamHeaders.Add("X-Forwarded-Proto", "{scheme}")
case "websocket":
u.upstreamHeaders.Add("Connection", "{>Connection}")
u.upstreamHeaders.Add("Upgrade", "{>Upgrade}")
case "without":
if !c.NextArg() {
return c.ArgErr()
}
u.WithoutPathPrefix = c.Val()
case "except":
ignoredPaths := c.RemainingArgs()
if len(ignoredPaths) == 0 {
return c.ArgErr()
}
u.IgnoredSubPaths = ignoredPaths
case "insecure_skip_verify":
u.insecureSkipVerify = true
default:
return c.Errf("unknown property '%s'", c.Val())
}
return nil
}
func (u *staticUpstream) healthCheck() {
for _, host := range u.Hosts {
hostURL := host.Name + u.HealthCheck.Path
if r, err := u.HealthCheck.Client.Get(hostURL); err == nil {
io.Copy(ioutil.Discard, r.Body)
r.Body.Close()
host.Unhealthy = r.StatusCode < 200 || r.StatusCode >= 400
} else {
host.Unhealthy = true
}
}
}
func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) {
ticker := time.NewTicker(u.HealthCheck.Interval)
u.healthCheck()
for {
select {
case <-ticker.C:
u.healthCheck()
case <-stop:
// TODO: the library should provide a stop channel and global
// waitgroup to allow goroutines started by plugins a chance
// to clean themselves up.
}
}
}
func (u *staticUpstream) Select(r *http.Request) *UpstreamHost {
pool := u.Hosts
if len(pool) == 1 {
if !pool[0].Available() {
return nil
}
return pool[0]
}
allUnavailable := true
for _, host := range pool {
if host.Available() {
allUnavailable = false
break
}
}
if allUnavailable {
return nil
}
if u.Policy == nil {
return (&Random{}).Select(pool, r)
}
return u.Policy.Select(pool, r)
}
func (u *staticUpstream) AllowedPath(requestPath string) bool {
for _, ignoredSubPath := range u.IgnoredSubPaths {
if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(u.From(), ignoredSubPath)) {
return false
}
}
return true
}
| 1 | 8,757 | Backends with hostnames that depend on the request are always up? | caddyserver-caddy | go |
@@ -4,6 +4,8 @@ import functools
import colander
import venusian
import six
+from jsonpatch import JsonPatchException
+from jsonpointer import JsonPointerException
from pyramid import exceptions as pyramid_exceptions
from pyramid.decorator import reify
from pyramid.security import Everyone | 1 | import re
import functools
import colander
import venusian
import six
from pyramid import exceptions as pyramid_exceptions
from pyramid.decorator import reify
from pyramid.security import Everyone
from pyramid.httpexceptions import (HTTPNotModified, HTTPPreconditionFailed,
HTTPNotFound, HTTPServiceUnavailable)
from kinto.core import logger
from kinto.core import Service
from kinto.core.errors import http_error, raise_invalid, send_alert, ERRORS
from kinto.core.events import ACTIONS
from kinto.core.storage import exceptions as storage_exceptions, Filter, Sort
from kinto.core.utils import (
COMPARISON, classname, native_value, decode64, encode64, json,
encode_header, decode_header, dict_subset, recursive_update_dict
)
from .model import Model, ShareableModel
from .schema import ResourceSchema
from .viewset import ViewSet, ShareableViewSet
def register(depth=1, **kwargs):
"""Ressource class decorator.
Register the decorated class in the cornice registry.
Pass all its keyword arguments to the register_resource
function.
"""
def wrapped(resource):
register_resource(resource, depth=depth + 1, **kwargs)
return resource
return wrapped
def register_resource(resource_cls, settings=None, viewset=None, depth=1,
**kwargs):
"""Register a resource in the cornice registry.
:param resource_cls:
The resource class to register.
It should be a class or have a "name" attribute.
:param viewset:
A ViewSet object, which will be used to find out which arguments should
be appended to the views, and where the views are.
:param depth:
A depth offset. It will be used to determine what is the level of depth
in the call tree. (set to 1 by default.)
Any additional keyword parameters will be used to override the viewset
attributes.
"""
if viewset is None:
viewset = resource_cls.default_viewset(**kwargs)
else:
viewset.update(**kwargs)
resource_name = viewset.get_name(resource_cls)
def register_service(endpoint_type, settings):
"""Registers a service in cornice, for the given type.
"""
path_pattern = getattr(viewset, '%s_path' % endpoint_type)
path_values = {'resource_name': resource_name}
path = path_pattern.format(**path_values)
name = viewset.get_service_name(endpoint_type, resource_cls)
service = Service(name, path, depth=depth,
**viewset.get_service_arguments())
# Attach viewset and resource to the service for later reference.
service.viewset = viewset
service.resource = resource_cls
service.type = endpoint_type
# Attach collection and record paths.
service.collection_path = viewset.collection_path.format(**path_values)
service.record_path = (viewset.record_path.format(**path_values)
if viewset.record_path is not None else None)
methods = getattr(viewset, '%s_methods' % endpoint_type)
for method in methods:
if not viewset.is_endpoint_enabled(
endpoint_type, resource_name, method.lower(), settings):
continue
argument_getter = getattr(viewset, '%s_arguments' % endpoint_type)
view_args = argument_getter(resource_cls, method)
view = viewset.get_view(endpoint_type, method.lower())
service.add_view(method, view, klass=resource_cls, **view_args)
return service
def callback(context, name, ob):
# get the callbacks registred by the inner services
# and call them from here when the @resource classes are being
# scanned by venusian.
config = context.config.with_package(info.module)
# Storage is mandatory for resources.
if not hasattr(config.registry, 'storage'):
msg = 'Mandatory storage backend is missing from configuration.'
raise pyramid_exceptions.ConfigurationError(msg)
# A service for the list.
service = register_service('collection', config.registry.settings)
config.add_cornice_service(service)
# An optional one for record endpoint.
if getattr(viewset, 'record_path') is not None:
service = register_service('record', config.registry.settings)
config.add_cornice_service(service)
info = venusian.attach(resource_cls, callback, category='pyramid', depth=depth)
return callback
class UserResource(object):
"""Base resource class providing every endpoint."""
default_viewset = ViewSet
"""Default :class:`kinto.core.resource.viewset.ViewSet` class to use when
the resource is registered."""
default_model = Model
"""Default :class:`kinto.core.resource.model.Model` class to use for
interacting the :mod:`kinto.core.storage` and :mod:`kinto.core.permission`
backends."""
mapping = ResourceSchema()
"""Schema to validate records."""
def __init__(self, request, context=None):
# Models are isolated by user.
parent_id = self.get_parent_id(request)
# Authentication to storage is transmitted as is (cf. cloud_storage).
auth = request.headers.get('Authorization')
# ID generator by resource name in settings.
default_id_generator = request.registry.id_generators['']
resource_name = context.resource_name if context else ''
id_generator = request.registry.id_generators.get(resource_name,
default_id_generator)
self.model = self.default_model(
storage=request.registry.storage,
id_generator=id_generator,
collection_id=classname(self),
parent_id=parent_id,
auth=auth)
self.request = request
self.context = context
self.record_id = self.request.matchdict.get('id')
self.force_patch_update = False
# Log resource context.
logger.bind(collection_id=self.model.collection_id,
collection_timestamp=self.timestamp)
@reify
def timestamp(self):
"""Return the current collection timestamp.
:rtype: int
"""
try:
return self.model.timestamp()
except storage_exceptions.BackendError as e:
is_readonly = self.request.registry.settings['readonly']
if not is_readonly:
raise e
# If the instance is configured to be readonly, and if the
# collection is empty, the backend will try to bump the timestamp.
# It fails if the configured db user has not write privileges.
logger.exception(e)
error_msg = ("Collection timestamp cannot be written. "
"Records endpoint must be hit at least once from a "
"writable instance.")
raise http_error(HTTPServiceUnavailable(),
errno=ERRORS.BACKEND,
message=error_msg)
def get_parent_id(self, request):
"""Return the parent_id of the resource with regards to the current
request.
:param request:
The request used to create the resource.
:rtype: str
"""
return request.prefixed_userid
def _get_known_fields(self):
"""Return all the `field` defined in the ressource mapping."""
known_fields = [c.name for c in self.mapping.children] + \
[self.model.id_field,
self.model.modified_field,
self.model.deleted_field]
return known_fields
def is_known_field(self, field):
"""Return ``True`` if `field` is defined in the resource schema.
If the resource schema allows unknown fields, this will always return
``True``.
:param str field: Field name
:rtype: bool
"""
if self.mapping.get_option('preserve_unknown'):
return True
known_fields = self._get_known_fields()
# Test first level only: ``target.data.id`` -> ``target``
field = field.split('.', 1)[0]
return field in known_fields
#
# End-points
#
def collection_get(self):
"""Model ``GET`` endpoint: retrieve multiple records.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and collection not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters or sorting are invalid.
"""
self._add_timestamp_header(self.request.response)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified()
self._raise_412_if_modified()
headers = self.request.response.headers
filters = self._extract_filters()
limit = self._extract_limit()
sorting = self._extract_sorting(limit)
partial_fields = self._extract_partial_fields()
filter_fields = [f.field for f in filters]
include_deleted = self.model.modified_field in filter_fields
pagination_rules, offset = self._extract_pagination_rules_from_token(
limit, sorting)
records, total_records = self.model.get_records(
filters=filters,
sorting=sorting,
limit=limit,
pagination_rules=pagination_rules,
include_deleted=include_deleted)
offset = offset + len(records)
next_page = None
if limit and len(records) == limit and offset < total_records:
lastrecord = records[-1]
next_page = self._next_page_url(sorting, limit, lastrecord, offset)
headers['Next-Page'] = encode_header(next_page)
if partial_fields:
records = [
dict_subset(record, partial_fields)
for record in records
]
# Bind metric about response size.
logger.bind(nb_records=len(records), limit=limit)
headers['Total-Records'] = encode_header('%s' % total_records)
return self.postprocess(records)
def collection_post(self):
"""Model ``POST`` endpoint: create a record.
If the new record id conflicts against an existing one, the
posted record is ignored, and the existing record is returned, with
a ``200`` status.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`
"""
new_record = self.request.validated.get('data', {})
try:
# Since ``id`` does not belong to schema, it is not in validated
# data. Must look up in body.
id_field = self.model.id_field
new_record[id_field] = _id = self.request.json['data'][id_field]
self._raise_400_if_invalid_id(_id)
existing = self._get_record_or_404(_id)
except (HTTPNotFound, KeyError, ValueError):
existing = None
self._raise_412_if_modified(record=existing)
if existing:
record = existing
action = ACTIONS.READ
else:
new_record = self.process_record(new_record)
record = self.model.create_record(new_record)
self.request.response.status_code = 201
action = ACTIONS.CREATE
return self.postprocess(record, action=action)
def collection_delete(self):
"""Model ``DELETE`` endpoint: delete multiple records.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters are invalid.
"""
self._raise_412_if_modified()
filters = self._extract_filters()
records, _ = self.model.get_records(filters=filters)
deleted = self.model.delete_records(filters=filters)
action = len(deleted) > 0 and ACTIONS.DELETE or ACTIONS.READ
return self.postprocess(deleted, action=action, old=records)
def get(self):
"""Record ``GET`` endpoint: retrieve a record.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and record not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified(record)
self._raise_412_if_modified(record)
partial_fields = self._extract_partial_fields()
if partial_fields:
record = dict_subset(record, partial_fields)
return self.postprocess(record)
def put(self):
"""Record ``PUT`` endpoint: create or replace the provided record and
return it.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. note::
If ``If-None-Match: *`` request header is provided, the
``PUT`` will succeed only if no record exists with this id.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
id_field = self.model.id_field
existing = None
tombstones = None
try:
existing = self._get_record_or_404(self.record_id)
except HTTPNotFound:
# Look if this record used to exist (for preconditions check).
filter_by_id = Filter(id_field, self.record_id, COMPARISON.EQ)
tombstones, _ = self.model.get_records(filters=[filter_by_id],
include_deleted=True)
if len(tombstones) > 0:
existing = tombstones[0]
finally:
if existing:
self._raise_412_if_modified(existing)
# If `data` is not provided, use existing record (or empty if creation)
post_record = self.request.validated.get('data', existing) or {}
record_id = post_record.setdefault(id_field, self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(post_record, old=existing)
if existing and not tombstones:
record = self.model.update_record(new_record)
else:
record = self.model.create_record(new_record)
self.request.response.status_code = 201
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
action = existing and ACTIONS.UPDATE or ACTIONS.CREATE
return self.postprocess(record, action=action, old=existing)
def patch(self):
"""Record ``PATCH`` endpoint: modify a record and return its
new version.
If a request header ``Response-Behavior`` is set to ``light``,
only the fields whose value was changed are returned.
If set to ``diff``, only the fields whose value became different than
the one provided are returned.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.apply_changes` or
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
existing = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(existing)
try:
# `data` attribute may not be present if only perms are patched.
changes = self.request.json.get('data', {})
except ValueError:
# If no `data` nor `permissions` is provided in patch, reject!
# XXX: This should happen in schema instead (c.f. ShareableViewSet)
error_details = {
'name': 'data',
'description': 'Provide at least one of data or permissions',
}
raise_invalid(self.request, **error_details)
updated = self.apply_changes(existing, changes=changes)
record_id = updated.setdefault(self.model.id_field,
self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(updated, old=existing)
changed_fields = [k for k in changes.keys()
if existing.get(k) != new_record.get(k)]
# Save in storage if necessary.
if changed_fields or self.force_patch_update:
new_record = self.model.update_record(new_record)
else:
# Behave as if storage would have added `id` and `last_modified`.
for extra_field in [self.model.modified_field,
self.model.id_field]:
new_record[extra_field] = existing[extra_field]
# Adjust response according to ``Response-Behavior`` header
body_behavior = self.request.headers.get('Response-Behavior', 'full')
if body_behavior.lower() == 'light':
# Only fields that were changed.
data = {k: new_record[k] for k in changed_fields}
elif body_behavior.lower() == 'diff':
# Only fields that are different from those provided.
data = {k: new_record[k] for k in changed_fields
if changes.get(k) != new_record.get(k)}
else:
data = new_record
timestamp = new_record.get(self.model.modified_field,
existing[self.model.modified_field])
self._add_timestamp_header(self.request.response, timestamp=timestamp)
return self.postprocess(data, action=ACTIONS.UPDATE, old=existing)
def delete(self):
"""Record ``DELETE`` endpoint: delete a record and return it.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(record)
# Retreive the last_modified information from a querystring if present.
last_modified = self.request.GET.get('last_modified')
if last_modified:
last_modified = native_value(last_modified.strip('"'))
if not isinstance(last_modified, six.integer_types):
error_details = {
'name': 'last_modified',
'location': 'querystring',
'description': 'Invalid value for %s' % last_modified
}
raise_invalid(self.request, **error_details)
# If less or equal than current record. Ignore it.
if last_modified <= record[self.model.modified_field]:
last_modified = None
deleted = self.model.delete_record(record, last_modified=last_modified)
return self.postprocess(deleted, action=ACTIONS.DELETE, old=record)
#
# Data processing
#
def process_record(self, new, old=None):
"""Hook for processing records before they reach storage, to introduce
specific logics on fields for example.
.. code-block:: python
def process_record(self, new, old=None):
new = super(MyResource, self).process_record(new, old)
version = old['version'] if old else 0
new['version'] = version + 1
return new
Or add extra validation based on request:
.. code-block:: python
from kinto.core.errors import raise_invalid
def process_record(self, new, old=None):
new = super(MyResource, self).process_record(new, old)
if new['browser'] not in request.headers['User-Agent']:
raise_invalid(self.request, name='browser', error='Wrong')
return new
:param dict new: the validated record to be created or updated.
:param dict old: the old record to be updated,
``None`` for creation endpoints.
:returns: the processed record.
:rtype: dict
"""
modified_field = self.model.modified_field
new_last_modified = new.get(modified_field)
# Drop the new last_modified if it is not an integer.
is_integer = isinstance(new_last_modified, int)
if not is_integer:
new.pop(modified_field, None)
return new
# Drop the new last_modified if lesser or equal to the old one.
is_less_or_equal = (old is not None and
new_last_modified <= old[modified_field])
if is_less_or_equal:
new.pop(modified_field, None)
return new
def apply_changes(self, record, changes):
"""Merge `changes` into `record` fields.
.. note::
This is used in the context of PATCH only.
Override this to control field changes at record level, for example:
.. code-block:: python
def apply_changes(self, record, changes):
# Ignore value change if inferior
if record['position'] > changes.get('position', -1):
changes.pop('position', None)
return super(MyResource, self).apply_changes(record, changes)
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if result does not comply with resource schema.
:returns: the new record with `changes` applied.
:rtype: dict
"""
for field, value in changes.items():
has_changed = record.get(field, value) != value
if self.mapping.is_readonly(field) and has_changed:
error_details = {
'name': field,
'description': 'Cannot modify {0}'.format(field)
}
raise_invalid(self.request, **error_details)
updated = record.copy()
# recursive patch and remove field if null attribute is passed (RFC 7396)
content_type = str(self.request.headers.get('Content-Type'))
if content_type == 'application/merge-patch+json':
recursive_update_dict(updated, changes, ignores=[None])
else:
updated.update(**changes)
try:
return self.mapping.deserialize(updated)
except colander.Invalid as e:
# Transform the errors we got from colander into Cornice errors.
# We could not rely on Service schema because the record should be
# validated only once the changes are applied
for field, error in e.asdict().items():
raise_invalid(self.request, name=field, description=error)
def postprocess(self, result, action=ACTIONS.READ, old=None):
body = {
'data': result
}
parent_id = self.get_parent_id(self.request)
self.request.notify_resource_event(parent_id=parent_id,
timestamp=self.timestamp,
data=result,
action=action,
old=old)
return body
#
# Internals
#
def _get_record_or_404(self, record_id):
"""Retrieve record from storage and raise ``404 Not found`` if missing.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
"""
if self.context and self.context.current_record:
# Set during authorization. Save a storage hit.
return self.context.current_record
try:
return self.model.get_record(record_id)
except storage_exceptions.RecordNotFoundError:
response = http_error(HTTPNotFound(),
errno=ERRORS.INVALID_RESOURCE_ID)
raise response
def _add_timestamp_header(self, response, timestamp=None):
"""Add current timestamp in response headers, when request comes in.
"""
if timestamp is None:
timestamp = self.timestamp
# Pyramid takes care of converting.
response.last_modified = timestamp / 1000.0
# Return timestamp as ETag.
response.headers['ETag'] = encode_header('"%s"' % timestamp)
def _add_cache_header(self, response):
"""Add Cache-Control and Expire headers, based a on a setting for the
current resource.
Cache headers will be set with anonymous requests only.
.. note::
The ``Cache-Control: no-cache`` response header does not prevent
caching in client. It will indicate the client to revalidate
the response content on each access. The client will send a
conditional request to the server and check that a
``304 Not modified`` is returned before serving content from cache.
"""
resource_name = self.context.resource_name if self.context else ''
setting_key = '%s_cache_expires_seconds' % resource_name
collection_expires = self.request.registry.settings.get(setting_key)
is_anonymous = self.request.prefixed_userid is None
if collection_expires and is_anonymous:
response.cache_expires(seconds=int(collection_expires))
else:
# Since `Expires` response header provides an HTTP data with a
# resolution in seconds, do not use Pyramid `cache_expires()` in
# order to omit it.
response.cache_control.no_cache = True
response.cache_control.no_store = True
def _raise_400_if_invalid_id(self, record_id):
"""Raise 400 if specified record id does not match the format excepted
by storage backends.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
is_string = isinstance(record_id, six.string_types)
if not is_string or not self.model.id_generator.match(record_id):
error_details = {
'location': 'path',
'description': "Invalid record id"
}
raise_invalid(self.request, **error_details)
def _raise_304_if_not_modified(self, record=None):
"""Raise 304 if current timestamp is inferior to the one specified
in headers.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified`
"""
if_none_match = self.request.headers.get('If-None-Match')
if not if_none_match:
return
if_none_match = decode_header(if_none_match)
try:
if not (if_none_match[0] == if_none_match[-1] == '"'):
raise ValueError()
modified_since = int(if_none_match[1:-1])
except (IndexError, ValueError):
if if_none_match == '*':
return
error_details = {
'location': 'headers',
'description': "Invalid value for If-None-Match"
}
raise_invalid(self.request, **error_details)
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp <= modified_since:
response = HTTPNotModified()
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_412_if_modified(self, record=None):
"""Raise 412 if current timestamp is superior to the one
specified in headers.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed`
"""
if_match = self.request.headers.get('If-Match')
if_none_match = self.request.headers.get('If-None-Match')
if not if_match and not if_none_match:
return
if_match = decode_header(if_match) if if_match else None
if record and if_none_match and decode_header(if_none_match) == '*':
if record.get(self.model.deleted_field, False):
# Tombstones should not prevent creation.
return
modified_since = -1 # Always raise.
elif if_match:
try:
if not (if_match[0] == if_match[-1] == '"'):
raise ValueError()
modified_since = int(if_match[1:-1])
except (IndexError, ValueError):
message = ("Invalid value for If-Match. The value should "
"be integer between double quotes.")
error_details = {
'location': 'headers',
'description': message
}
raise_invalid(self.request, **error_details)
else:
# In case _raise_304_if_not_modified() did not raise.
return
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp > modified_since:
error_msg = 'Resource was modified meanwhile'
details = {'existing': record} if record else {}
response = http_error(HTTPPreconditionFailed(),
errno=ERRORS.MODIFIED_MEANWHILE,
message=error_msg,
details=details)
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_400_if_id_mismatch(self, new_id, record_id):
"""Raise 400 if the `new_id`, within the request body, does not match
the `record_id`, obtained from request path.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
if new_id != record_id:
error_msg = 'Record id does not match existing record'
error_details = {
'name': self.model.id_field,
'description': error_msg
}
raise_invalid(self.request, **error_details)
def _extract_partial_fields(self):
"""Extract the fields to do the projection from QueryString parameters.
"""
fields = self.request.GET.get('_fields', None)
if fields:
fields = fields.split(',')
root_fields = [f.split('.')[0] for f in fields]
known_fields = self._get_known_fields()
invalid_fields = set(root_fields) - set(known_fields)
preserve_unknown = self.mapping.get_option('preserve_unknown')
if not preserve_unknown and invalid_fields:
error_msg = "Fields %s do not exist" % ','.join(invalid_fields)
error_details = {
'name': "Invalid _fields parameter",
'description': error_msg
}
raise_invalid(self.request, **error_details)
# Since id and last_modified are part of the synchronisation
# API, force their presence in payloads.
fields = fields + [self.model.id_field, self.model.modified_field]
return fields
def _extract_limit(self):
"""Extract limit value from QueryString parameters."""
paginate_by = self.request.registry.settings['paginate_by']
limit = self.request.GET.get('_limit', paginate_by)
if limit:
try:
limit = int(limit)
except ValueError:
error_details = {
'location': 'querystring',
'description': "_limit should be an integer"
}
raise_invalid(self.request, **error_details)
# If limit is higher than paginate_by setting, ignore it.
if limit and paginate_by:
limit = min(limit, paginate_by)
return limit
def _extract_filters(self, queryparams=None):
"""Extracts filters from QueryString parameters."""
if not queryparams:
queryparams = self.request.GET
filters = []
for param, paramvalue in queryparams.items():
param = param.strip()
error_details = {
'name': param,
'location': 'querystring',
'description': 'Invalid value for %s' % param
}
# Ignore specific fields
if param.startswith('_') and param not in ('_since',
'_to',
'_before'):
continue
# Handle the _since specific filter.
if param in ('_since', '_to', '_before'):
value = native_value(paramvalue.strip('"'))
if not isinstance(value, six.integer_types):
raise_invalid(self.request, **error_details)
if param == '_since':
operator = COMPARISON.GT
else:
if param == '_to':
message = ('_to is now deprecated, '
'you should use _before instead')
url = ('https://kinto.readthedocs.io/en/2.4.0/api/'
'resource.html#list-of-available-url-'
'parameters')
send_alert(self.request, message, url)
operator = COMPARISON.LT
filters.append(
Filter(self.model.modified_field, value, operator)
)
continue
allKeywords = '|'.join([i.name.lower() for i in COMPARISON])
m = re.match(r'^('+allKeywords+')_([\w\.]+)$', param)
if m:
keyword, field = m.groups()
operator = getattr(COMPARISON, keyword.upper())
else:
operator, field = COMPARISON.EQ, param
if not self.is_known_field(field):
error_msg = "Unknown filter field '{0}'".format(param)
error_details['description'] = error_msg
raise_invalid(self.request, **error_details)
value = native_value(paramvalue)
if operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
value = set([native_value(v) for v in paramvalue.split(',')])
all_integers = all([isinstance(v, six.integer_types)
for v in value])
all_strings = all([isinstance(v, six.text_type)
for v in value])
has_invalid_value = (
(field == self.model.id_field and not all_strings) or
(field == self.model.modified_field and not all_integers)
)
if has_invalid_value:
raise_invalid(self.request, **error_details)
filters.append(Filter(field, value, operator))
return filters
def _extract_sorting(self, limit):
"""Extracts filters from QueryString parameters."""
specified = self.request.GET.get('_sort', '').split(',')
sorting = []
modified_field_used = self.model.modified_field in specified
for field in specified:
field = field.strip()
m = re.match(r'^([\-+]?)([\w\.]+)$', field)
if m:
order, field = m.groups()
if not self.is_known_field(field):
error_details = {
'location': 'querystring',
'description': "Unknown sort field '{0}'".format(field)
}
raise_invalid(self.request, **error_details)
direction = -1 if order == '-' else 1
sorting.append(Sort(field, direction))
if not modified_field_used:
# Add a sort by the ``modified_field`` in descending order
# useful for pagination
sorting.append(Sort(self.model.modified_field, -1))
return sorting
def _build_pagination_rules(self, sorting, last_record, rules=None):
"""Return the list of rules for a given sorting attribute and
last_record.
"""
if rules is None:
rules = []
rule = []
next_sorting = sorting[:-1]
for field, _ in next_sorting:
rule.append(Filter(field, last_record.get(field), COMPARISON.EQ))
field, direction = sorting[-1]
if direction == -1:
rule.append(Filter(field, last_record.get(field), COMPARISON.LT))
else:
rule.append(Filter(field, last_record.get(field), COMPARISON.GT))
rules.append(rule)
if len(next_sorting) == 0:
return rules
return self._build_pagination_rules(next_sorting, last_record, rules)
def _extract_pagination_rules_from_token(self, limit, sorting):
"""Get pagination params."""
queryparams = self.request.GET
token = queryparams.get('_token', None)
filters = []
offset = 0
if token:
try:
tokeninfo = json.loads(decode64(token))
if not isinstance(tokeninfo, dict):
raise ValueError()
last_record = tokeninfo['last_record']
offset = tokeninfo['offset']
except (ValueError, KeyError, TypeError):
error_msg = '_token has invalid content'
error_details = {
'location': 'querystring',
'description': error_msg
}
raise_invalid(self.request, **error_details)
filters = self._build_pagination_rules(sorting, last_record)
return filters, offset
def _next_page_url(self, sorting, limit, last_record, offset):
"""Build the Next-Page header from where we stopped."""
token = self._build_pagination_token(sorting, last_record, offset)
params = self.request.GET.copy()
params['_limit'] = limit
params['_token'] = token
service = self.request.current_service
next_page_url = self.request.route_url(service.name, _query=params,
**self.request.matchdict)
return next_page_url
def _build_pagination_token(self, sorting, last_record, offset):
"""Build a pagination token.
It is a base64 JSON object with the sorting fields values of
the last_record.
"""
token = {
'last_record': {},
'offset': offset
}
for field, _ in sorting:
token['last_record'][field] = last_record[field]
return encode64(json.dumps(token))
class ShareableResource(UserResource):
"""Shareable resources allow to set permissions on records, in order to
share their access or protect their modification.
"""
default_model = ShareableModel
default_viewset = ShareableViewSet
permissions = ('read', 'write')
"""List of allowed permissions names."""
def __init__(self, *args, **kwargs):
super(ShareableResource, self).__init__(*args, **kwargs)
# In base resource, PATCH only hit storage if no data has changed.
# Here, we force update because we add the current principal to
# the ``write`` ACE.
self.force_patch_update = True
# Required by the ShareableModel class.
self.model.permission = self.request.registry.permission
if self.request.prefixed_userid is None:
# The principal of an anonymous is system.Everyone
self.model.current_principal = Everyone
else:
self.model.current_principal = self.request.prefixed_userid
self.model.effective_principals = self.request.effective_principals
if self.context:
self.model.get_permission_object_id = functools.partial(
self.context.get_permission_object_id,
self.request)
def get_parent_id(self, request):
"""Unlike :class:`kinto.core.resource.UserResource`, records are not
isolated by user.
See https://github.com/mozilla-services/cliquet/issues/549
:returns: A constant empty value.
"""
return ''
def _extract_filters(self, queryparams=None):
"""Override default filters extraction from QueryString to allow
partial collection of records.
XXX: find more elegant approach to add custom filters.
"""
filters = super(ShareableResource, self)._extract_filters(queryparams)
ids = self.context.shared_ids
if ids is not None:
filter_by_id = Filter(self.model.id_field, ids, COMPARISON.IN)
filters.insert(0, filter_by_id)
return filters
def _raise_412_if_modified(self, record=None):
"""Do not provide the permissions among the record fields.
Ref: https://github.com/Kinto/kinto/issues/224
"""
if record:
record = record.copy()
record.pop(self.model.permissions_field, None)
return super(ShareableResource, self)._raise_412_if_modified(record)
def process_record(self, new, old=None):
"""Read permissions from request body, and in the case of ``PUT`` every
existing ACE is removed (using empty list).
"""
new = super(ShareableResource, self).process_record(new, old)
permissions = self.request.validated.get('permissions', {})
annotated = new.copy()
if permissions:
is_put = (self.request.method.lower() == 'put')
if is_put:
# Remove every existing ACEs using empty lists.
for perm in self.permissions:
permissions.setdefault(perm, [])
annotated[self.model.permissions_field] = permissions
return annotated
def postprocess(self, result, action=ACTIONS.READ, old=None):
"""Add ``permissions`` attribute in response body.
In the HTTP API, it was decided that ``permissions`` would reside
outside the ``data`` attribute.
"""
body = {}
if not isinstance(result, list):
# record endpoint.
perms = result.pop(self.model.permissions_field, None)
if perms is not None:
body['permissions'] = {k: list(p) for k, p in perms.items()}
if old:
# Remove permissions from event payload.
old.pop(self.model.permissions_field, None)
data = super(ShareableResource, self).postprocess(result, action, old)
body.update(data)
return body
| 1 | 10,054 | I'd rather catch those to `utils.py` and raise a simple ValueError from them. From the resource point of view, these are details of implementation | Kinto-kinto | py |
@@ -11,10 +11,6 @@ module Travis
module Build
class Script
class Julia < Script
- DEFAULTS = {
- julia: 'release',
- }
-
def export
super
| 1 | # vim:set ts=2 sw=2 sts=2 autoindent:
# Community maintainers:
#
# Tony Kelman <tony kelman net, @tkelman>
# Pontus Stenetorp <pontus stenetorp se, @ninjin>
# Elliot Saba <staticfloat gmail com, @staticfloat>
# Simon Byrne <simonbyrne gmail.com, @simonbyrne>
#
module Travis
module Build
class Script
class Julia < Script
DEFAULTS = {
julia: 'release',
}
def export
super
sh.export 'TRAVIS_JULIA_VERSION', config[:julia].to_s.shellescape,
echo: false
sh.export 'JULIA_PROJECT', "@."
end
def setup
super
sh.echo 'Julia for Travis-CI is not officially supported, ' \
'but is community maintained.', ansi: :green
sh.echo 'Please file any issues using the following link',
ansi: :green
sh.echo ' https://github.com/travis-ci/travis-ci/issues' \
'/new?labels=julia', ansi: :green
sh.echo 'and mention \`@travis-ci/julia-maintainers\`' \
'in the issue', ansi: :green
sh.fold 'Julia-install' do
sh.echo 'Installing Julia', ansi: :yellow
sh.cmd 'CURL_USER_AGENT="Travis-CI $(curl --version | head -n 1)"'
case config[:os]
when 'linux'
sh.cmd 'mkdir -p ~/julia'
sh.cmd %Q{curl -A "$CURL_USER_AGENT" -s -L --retry 7 '#{julia_url}' } \
'| tar -C ~/julia -x -z --strip-components=1 -f -'
when 'osx'
sh.cmd %Q{curl -A "$CURL_USER_AGENT" -s -L --retry 7 -o julia.dmg '#{julia_url}'}
sh.cmd 'mkdir juliamnt'
sh.cmd 'hdiutil mount -readonly -mountpoint juliamnt julia.dmg'
sh.cmd 'cp -a juliamnt/*.app/Contents/Resources/julia ~/'
else
sh.failure "Operating system not supported: #{config[:os]}"
end
sh.cmd 'export PATH="${PATH}:${HOME}/julia/bin"'
end
end
def announce
super
sh.cmd 'julia --color=yes -e "VERSION >= v\"0.7.0-DEV.3630\" && using InteractiveUtils; versioninfo()"'
sh.echo ''
end
def script
sh.echo 'Executing the default test script', ansi: :green
# Extract the package name from the repository slug (org/pkgname.jl)
m = /(\w+?)\/(\w+?)(?:\.jl)?$/.match(data[:repository][:slug])
if m != nil
sh.export 'JL_PKG', m[2]
end
sh.echo 'Package name determined from repository url to be ${JL_PKG}',
ansi: :green
# Check if the repository is using new Pkg
sh.if "-f Project.toml || -f JuliaProject.toml" do
sh.if '-a .git/shallow' do
sh.cmd 'git fetch --unshallow'
end
# build
sh.cmd 'julia --color=yes -e "if VERSION < v\"0.7.0-DEV.5183\"; Pkg.clone(pwd()); Pkg.build(\"${JL_PKG}\"); else using Pkg; Pkg.build(); end"'
# run tests
sh.cmd 'julia --check-bounds=yes --color=yes -e "if VERSION < v\"0.7.0-DEV.5183\"; Pkg.test(\"${JL_PKG}\", coverage=true); else using Pkg; Pkg.test(coverage=true); end"'
end
sh.else do
sh.if '-a .git/shallow' do
sh.cmd 'git fetch --unshallow'
end
# build
sh.cmd 'julia --color=yes -e "VERSION >= v\"0.7.0-DEV.5183\" && using Pkg; Pkg.clone(pwd()); Pkg.build(\"${JL_PKG}\")"'
# run tests
sh.cmd 'julia --check-bounds=yes --color=yes -e "VERSION >= v\"0.7.0-DEV.5183\" && using Pkg; Pkg.test(\"${JL_PKG}\", coverage=true)"'
end
end
private
def julia_url
case config[:os]
when 'linux'
osarch = 'linux/x64'
ext = 'linux-x86_64.tar.gz'
nightlyext = 'linux64.tar.gz'
when 'osx'
osarch = 'mac/x64'
ext = 'mac64.dmg'
nightlyext = ext
end
case julia_version = Array(config[:julia]).first.to_s
when 'release'
# CHANGEME on new minor releases (once or twice a year)
url = "julialang-s3.julialang.org/bin/#{osarch}/0.6/julia-0.6-latest-#{ext}"
when 'nightly'
url = "julialangnightlies-s3.julialang.org/bin/#{osarch}/julia-latest-#{nightlyext}"
when /^(\d+\.\d+)\.\d+$/
url = "julialang-s3.julialang.org/bin/#{osarch}/#{$1}/julia-#{julia_version}-#{ext}"
when /^(\d+\.\d+)$/
url = "julialang-s3.julialang.org/bin/#{osarch}/#{$1}/julia-#{$1}-latest-#{ext}"
else
sh.failure "Unknown Julia version: #{julia_version}"
end
"https://#{url}"
end
end
end
end
end
| 1 | 16,284 | make this 1.0 ? I don't think all that many people do `language: julia` without any `julia:` specifiers, but may as well keep that possible? | travis-ci-travis-build | rb |
@@ -20,7 +20,10 @@
//
// For secrets.OpenKeeper, azurekeyvault registers for the scheme "azurekeyvault".
// The default URL opener will use Dial, which gets default credentials from the
-// environment.
+// environment, unless the AZURE_KEYVAULT_AUTH_VIA_CLI environment variable is
+// set to true, in which case it uses DialUsingCLIAuth to get credentials from the
+// "az" command line.
+//
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information. | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package azurekeyvault provides a secrets implementation backed by Azure KeyVault.
// See https://docs.microsoft.com/en-us/azure/key-vault/key-vault-whatis for more information.
// Use OpenKeeper to construct a *secrets.Keeper.
//
// URLs
//
// For secrets.OpenKeeper, azurekeyvault registers for the scheme "azurekeyvault".
// The default URL opener will use Dial, which gets default credentials from the
// environment.
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// As
//
// azurekeyvault exposes the following type for As:
// - Error: autorest.DetailedError, see https://godoc.org/github.com/Azure/go-autorest/autorest#DetailedError
package azurekeyvault
import (
"context"
"encoding/base64"
"fmt"
"net/url"
"path"
"regexp"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/google/wire"
"gocloud.dev/gcerrors"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/useragent"
"gocloud.dev/secrets"
)
var (
// Map of HTTP Status Code to go-cloud ErrorCode
errorCodeMap = map[int]gcerrors.ErrorCode{
200: gcerrors.OK,
400: gcerrors.InvalidArgument,
401: gcerrors.PermissionDenied,
403: gcerrors.PermissionDenied,
404: gcerrors.NotFound,
408: gcerrors.DeadlineExceeded,
429: gcerrors.ResourceExhausted,
500: gcerrors.Internal,
501: gcerrors.Unimplemented,
}
)
func init() {
secrets.DefaultURLMux().RegisterKeeper(Scheme, new(defaultDialer))
}
// Set holds Wire providers for this package.
var Set = wire.NewSet(
Dial,
wire.Struct(new(URLOpener), "Client"),
)
// defaultDialer dials Azure KeyVault from the environment on the first call to OpenKeeperURL.
type defaultDialer struct {
init sync.Once
opener *URLOpener
err error
}
func (o *defaultDialer) OpenKeeperURL(ctx context.Context, u *url.URL) (*secrets.Keeper, error) {
o.init.Do(func() {
client, err := Dial()
if err != nil {
o.err = err
return
}
o.opener = &URLOpener{Client: client}
})
if o.err != nil {
return nil, fmt.Errorf("open keeper %v: failed to Dial default KeyVault: %v", u, o.err)
}
return o.opener.OpenKeeperURL(ctx, u)
}
// Scheme is the URL scheme azurekeyvault registers its URLOpener under on secrets.DefaultMux.
const Scheme = "azurekeyvault"
// URLOpener opens Azure KeyVault URLs like
// "azurekeyvault://{keyvault-name}.vault.azure.net/keys/{key-name}/{key-version}?algorithm=RSA-OAEP-256".
//
// The "azurekeyvault" URL scheme is replaced with "https" to construct an Azure
// Key Vault keyID, as described in https://docs.microsoft.com/en-us/azure/key-vault/about-keys-secrets-and-certificates.
// The "/{key-version}"" suffix is optional; it defaults to the latest version.
//
// The "algorithm" query parameter sets the algorithm to use; see
// https://docs.microsoft.com/en-us/rest/api/keyvault/encrypt/encrypt#jsonwebkeyencryptionalgorithm
// for supported algorithms. It defaults to "RSA-OAEP-256".
//
// No other query parameters are supported.
type URLOpener struct {
// Client must be set to a non-nil value.
Client *keyvault.BaseClient
// Options specifies the options to pass to OpenKeeper.
Options KeeperOptions
}
// OpenKeeperURL opens an Azure KeyVault Keeper based on u.
func (o *URLOpener) OpenKeeperURL(ctx context.Context, u *url.URL) (*secrets.Keeper, error) {
q := u.Query()
algorithm := q.Get("algorithm")
if algorithm != "" {
o.Options.Algorithm = keyvault.JSONWebKeyEncryptionAlgorithm(algorithm)
q.Del("algorithm")
}
for param := range q {
return nil, fmt.Errorf("open keeper %v: invalid query parameter %q", u, param)
}
keyID := "https://" + path.Join(u.Host, u.Path)
return OpenKeeper(o.Client, keyID, &o.Options)
}
type keeper struct {
client *keyvault.BaseClient
keyVaultURI string
keyName string
keyVersion string
options *KeeperOptions
}
// KeeperOptions provides configuration options for encryption/decryption operations.
type KeeperOptions struct {
// Algorithm sets the encryption algorithm used.
// Defaults to "RSA-OAEP-256".
// See https://docs.microsoft.com/en-us/rest/api/keyvault/encrypt/encrypt#jsonwebkeyencryptionalgorithm
// for more details.
Algorithm keyvault.JSONWebKeyEncryptionAlgorithm
}
// Dial gets a new *keyvault.BaseClient, see https://godoc.org/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault#BaseClient
func Dial() (*keyvault.BaseClient, error) {
auth, err := auth.NewAuthorizerFromEnvironment()
if err != nil {
return nil, err
}
client := keyvault.NewWithoutDefaults()
client.Authorizer = auth
client.Sender = autorest.NewClientWithUserAgent(useragent.AzureUserAgentPrefix("secrets"))
return &client, nil
}
var (
// Note that the last binding may be just a key, or key/version.
keyIDRE = regexp.MustCompile("^(https://.+\\.vault\\.azure\\.net/)keys/(.+)$")
)
// OpenKeeper returns a *secrets.Keeper that uses Azure keyVault.
//
// client is a *keyvault.BaseClient instance, see https://godoc.org/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault#BaseClient.
//
// keyID is a Azure Key Vault key identifier like "https://{keyvault-name}.vault.azure.net/keys/{key-name}/{key-version}".
// The "/{key-version}" suffix is optional; it defaults to the latest version.
// See https://docs.microsoft.com/en-us/azure/key-vault/about-keys-secrets-and-certificates
// for more details.
func OpenKeeper(client *keyvault.BaseClient, keyID string, opts *KeeperOptions) (*secrets.Keeper, error) {
drv, err := openKeeper(client, keyID, opts)
if err != nil {
return nil, err
}
return secrets.NewKeeper(drv), nil
}
func openKeeper(client *keyvault.BaseClient, keyID string, opts *KeeperOptions) (*keeper, error) {
if opts == nil {
opts = &KeeperOptions{}
}
if opts.Algorithm == "" {
opts.Algorithm = keyvault.RSAOAEP256
}
matches := keyIDRE.FindStringSubmatch(keyID)
if len(matches) != 3 {
return nil, fmt.Errorf("invalid keyID %q; must match %v %v", keyID, keyIDRE, matches)
}
// matches[0] is the whole keyID, [1] is the keyVaultURI, and [2] is the key or the key/version.
keyVaultURI := matches[1]
parts := strings.SplitN(matches[2], "/", 1)
keyName := parts[0]
var keyVersion string
if len(parts) > 1 {
keyVersion = parts[1]
}
return &keeper{
client: client,
keyVaultURI: keyVaultURI,
keyName: keyName,
keyVersion: keyVersion,
options: opts,
}, nil
}
// Encrypt encrypts the plaintext into a ciphertext.
func (k *keeper) Encrypt(ctx context.Context, plaintext []byte) ([]byte, error) {
b64Text := base64.StdEncoding.EncodeToString(plaintext)
keyOpsResult, err := k.client.Encrypt(ctx, k.keyVaultURI, k.keyName, k.keyVersion, keyvault.KeyOperationsParameters{
Algorithm: keyvault.JSONWebKeyEncryptionAlgorithm(k.options.Algorithm),
Value: &b64Text,
})
if err != nil {
return nil, err
}
return []byte(*keyOpsResult.Result), nil
}
// Decrypt decrypts the ciphertext into a plaintext.
func (k *keeper) Decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) {
cipherval := string(ciphertext)
keyOpsResult, err := k.client.Decrypt(ctx, k.keyVaultURI, k.keyName, k.keyVersion, keyvault.KeyOperationsParameters{
Algorithm: keyvault.JSONWebKeyEncryptionAlgorithm(k.options.Algorithm),
Value: &cipherval,
})
if err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(*keyOpsResult.Result)
}
// Close implements driver.Keeper.Close.
func (k *keeper) Close() error { return nil }
// ErrorAs implements driver.Keeper.ErrorAs.
func (k *keeper) ErrorAs(err error, i interface{}) bool {
e, ok := err.(autorest.DetailedError)
if !ok {
return false
}
p, ok := i.(*autorest.DetailedError)
if !ok {
return false
}
*p = e
return true
}
// ErrorCode implements driver.ErrorCode.
func (k *keeper) ErrorCode(err error) gcerrors.ErrorCode {
de, ok := err.(autorest.DetailedError)
if !ok {
return gcerr.Unknown
}
ec, ok := errorCodeMap[de.StatusCode.(int)]
if !ok {
return gcerr.Unknown
}
return ec
}
| 1 | 18,909 | nit: `AZURE_KEYVAULT_AUTH_VIA_CLI` is defined by us, so it might be helpful to differentiate it from azure's env names. Maybe starts with `GOCDK`. | google-go-cloud | go |
@@ -34,10 +34,10 @@ type Validator struct {
}
// NewValidator return Validator instance
-func NewValidator(sessionManager session.Manager, extractor identity.Extractor) *Validator {
+func NewValidator(sessionStorage *session.Storage, extractor identity.Extractor) *Validator {
return &Validator{
clientMap: &clientMap{
- sessionManager: sessionManager,
+ sessions: sessionStorage,
sessionClientIDs: make(map[session.SessionID]int),
sessionMapLock: sync.Mutex{},
}, | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package session
import (
"sync"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/session"
)
// SignaturePrefix is used to prefix with each session string before calculating signature or extracting identity
const SignaturePrefix = "MystVpnSessionId:"
// Validator structure that keeps attributes needed Validator operations
type Validator struct {
clientMap *clientMap
identityExtractor identity.Extractor
}
// NewValidator return Validator instance
func NewValidator(sessionManager session.Manager, extractor identity.Extractor) *Validator {
return &Validator{
clientMap: &clientMap{
sessionManager: sessionManager,
sessionClientIDs: make(map[session.SessionID]int),
sessionMapLock: sync.Mutex{},
},
identityExtractor: extractor,
}
}
// Validate provides glue code for openvpn management interface to validate incoming client login request,
// it expects session id as username, and session signature signed by client as password
func (v *Validator) Validate(clientID int, sessionString, signatureString string) (bool, error) {
sessionID := session.SessionID(sessionString)
currentSession, found, err := v.clientMap.FindClientSession(clientID, sessionID)
if err != nil {
return false, err
}
if !found {
v.clientMap.UpdateClientSession(clientID, sessionID)
}
signature := identity.SignatureBase64(signatureString)
extractedIdentity, err := v.identityExtractor.Extract([]byte(SignaturePrefix+sessionString), signature)
if err != nil {
return false, err
}
return currentSession.ConsumerID == extractedIdentity, nil
}
// Cleanup removes session from underlying session managers
func (v *Validator) Cleanup(sessionString string) error {
sessionID := session.SessionID(sessionString)
return v.clientMap.RemoveSession(sessionID)
}
| 1 | 11,901 | Depend on interfaces not on structures | mysteriumnetwork-node | go |
@@ -180,7 +180,7 @@ void signalHandler(int sig) {
case SIGTERM:
FLOG_INFO("Signal %d(%s) received, stopping this server", sig, ::strsignal(sig));
if (gStorageServer) {
- gStorageServer->stop();
+ gStorageServer->notifyStop();
}
break;
default: | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include <folly/ssl/Init.h>
#include <thrift/lib/cpp2/server/ThriftServer.h>
#include "common/base/Base.h"
#include "common/base/SignalHandler.h"
#include "common/network/NetworkUtils.h"
#include "common/process/ProcessUtils.h"
#include "common/time/TimezoneInfo.h"
#include "storage/StorageServer.h"
#include "version/Version.h"
DEFINE_string(local_ip, "", "IP address which is used to identify this server");
DEFINE_string(data_path,
"",
"Root data path, multi paths should be split by comma."
"For rocksdb engine, one path one instance.");
DEFINE_string(wal_path,
"",
"Nebula wal path. By default, wal will be stored as a sibling of "
"rocksdb data.");
DEFINE_string(listener_path,
"",
"Path for listener, only wal will be saved."
"if it is not empty, data_path will not take effect.");
DEFINE_bool(daemonize, true, "Whether to run the process as a daemon");
DEFINE_string(pid_file, "pids/nebula-storaged.pid", "File to hold the process id");
DEFINE_string(meta_server_addrs,
"",
"list of meta server addresses,"
"the format looks like ip1:port1, ip2:port2, ip3:port3");
DECLARE_int32(port);
using nebula::operator<<;
using nebula::HostAddr;
using nebula::ProcessUtils;
using nebula::Status;
using nebula::StatusOr;
using nebula::network::NetworkUtils;
static void signalHandler(int sig);
static Status setupSignalHandler();
extern Status setupLogging();
#if defined(__x86_64__)
extern Status setupBreakpad();
#endif
std::unique_ptr<nebula::storage::StorageServer> gStorageServer;
int main(int argc, char *argv[]) {
google::SetVersionString(nebula::versionString());
// Detect if the server has already been started
// Check pid before glog init, in case of user may start daemon twice
// the 2nd will make the 1st failed to output log anymore
gflags::ParseCommandLineFlags(&argc, &argv, false);
// Setup logging
auto status = setupLogging();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
#if defined(__x86_64__)
status = setupBreakpad();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
#endif
auto pidPath = FLAGS_pid_file;
status = ProcessUtils::isPidAvailable(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
folly::init(&argc, &argv, true);
if (FLAGS_enable_ssl || FLAGS_enable_meta_ssl) {
folly::ssl::init();
}
if (FLAGS_daemonize) {
google::SetStderrLogging(google::FATAL);
} else {
google::SetStderrLogging(google::INFO);
}
if (FLAGS_daemonize) {
status = ProcessUtils::daemonize(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
} else {
// Write the current pid into the pid file
status = ProcessUtils::makePidFile(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
}
if (FLAGS_data_path.empty()) {
LOG(ERROR) << "Storage Data Path should not empty";
return EXIT_FAILURE;
}
std::string hostName;
if (FLAGS_local_ip.empty()) {
hostName = nebula::network::NetworkUtils::getHostname();
} else {
status = NetworkUtils::validateHostOrIp(FLAGS_local_ip);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
hostName = FLAGS_local_ip;
}
nebula::HostAddr localhost{hostName, FLAGS_port};
LOG(INFO) << "localhost = " << localhost;
auto metaAddrsRet = nebula::network::NetworkUtils::toHosts(FLAGS_meta_server_addrs);
if (!metaAddrsRet.ok() || metaAddrsRet.value().empty()) {
LOG(ERROR) << "Can't get metaServer address, status:" << metaAddrsRet.status()
<< ", FLAGS_meta_server_addrs:" << FLAGS_meta_server_addrs;
return EXIT_FAILURE;
}
std::vector<std::string> paths;
folly::split(",", FLAGS_data_path, paths, true);
std::transform(paths.begin(), paths.end(), paths.begin(), [](auto &p) {
return folly::trimWhitespace(p).str();
});
if (paths.empty()) {
LOG(ERROR) << "Bad data_path format:" << FLAGS_data_path;
return EXIT_FAILURE;
}
// Setup the signal handlers
status = setupSignalHandler();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
// Initialize the global timezone, it's only used for datetime type compute
// won't affect the process timezone.
status = nebula::time::Timezone::initializeGlobalTimezone();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
gStorageServer = std::make_unique<nebula::storage::StorageServer>(
localhost, metaAddrsRet.value(), paths, FLAGS_wal_path, FLAGS_listener_path);
if (!gStorageServer->start()) {
LOG(ERROR) << "Storage server start failed";
gStorageServer->stop();
return EXIT_FAILURE;
}
gStorageServer->waitUntilStop();
LOG(INFO) << "The storage Daemon stopped";
return EXIT_SUCCESS;
}
Status setupSignalHandler() {
return nebula::SignalHandler::install(
{SIGINT, SIGTERM},
[](nebula::SignalHandler::GeneralSignalInfo *info) { signalHandler(info->sig()); });
}
void signalHandler(int sig) {
switch (sig) {
case SIGINT:
case SIGTERM:
FLOG_INFO("Signal %d(%s) received, stopping this server", sig, ::strsignal(sig));
if (gStorageServer) {
gStorageServer->stop();
}
break;
default:
FLOG_ERROR("Signal %d(%s) received but ignored", sig, ::strsignal(sig));
}
}
| 1 | 32,644 | This fix looks good. My concern, don't forget to fix metad too! | vesoft-inc-nebula | cpp |
@@ -196,13 +196,15 @@ func makeMDJournalWithIDJournal(
}
if earliest != nil {
- if earliest.BID() != latest.BID() {
+ if earliest.BID() != latest.BID() &&
+ !(earliest.BID() == NullBranchID &&
+ latest.BID() == LocalSquashBranchID) {
return nil, fmt.Errorf(
"earliest.BID=%s != latest.BID=%s",
earliest.BID(), latest.BID())
}
- log.CDebugf(nil, "Initializing with branch ID %s", earliest.BID())
- journal.branchID = earliest.BID()
+ log.CDebugf(nil, "Initializing with branch ID %s", latest.BID())
+ journal.branchID = latest.BID()
}
return &journal, nil | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/tlf"
"golang.org/x/net/context"
"github.com/keybase/client/go/protocol/keybase1"
)
// ImmutableBareRootMetadata is a thin wrapper around a
// BareRootMetadata and an ExtraMetadata that takes ownership of it
// and does not ever modify it again. Thus, its MdID can be calculated
// and stored along with a local timestamp. ImmutableBareRootMetadata
// objects can be assumed to never alias a (modifiable)
// BareRootMetadata.
//
// Note that crypto.MakeMdID() on an ImmutableBareRootMetadata will
// compute the wrong result, since anonymous fields of interface type
// are not encoded inline by the codec. Use
// crypto.MakeMDID(ibrmd.BareRootMetadata) instead.
//
// TODO: Move this to bare_root_metadata.go if it's used in more
// places.
type ImmutableBareRootMetadata struct {
BareRootMetadata
extra ExtraMetadata
mdID MdID
localTimestamp time.Time
}
// MakeImmutableBareRootMetadata makes a new ImmutableBareRootMetadata
// from the given BareRootMetadata and its corresponding MdID.
func MakeImmutableBareRootMetadata(
rmd BareRootMetadata, extra ExtraMetadata, mdID MdID,
localTimestamp time.Time) ImmutableBareRootMetadata {
if mdID == (MdID{}) {
panic("zero mdID passed to MakeImmutableBareRootMetadata")
}
return ImmutableBareRootMetadata{rmd, extra, mdID, localTimestamp}
}
// MakeBareTlfHandleWithExtra makes a BareTlfHandle for this
// ImmutableBareRootMetadata. Should be used only by servers and MDOps.
func (ibrmd ImmutableBareRootMetadata) MakeBareTlfHandleWithExtra() (
tlf.Handle, error) {
return ibrmd.BareRootMetadata.MakeBareTlfHandle(ibrmd.extra)
}
// mdJournal stores a single ordered list of metadata IDs for a (TLF,
// user, device) tuple, along with the associated metadata objects, in
// flat files on disk.
//
// The directory layout looks like:
//
// dir/md_journal/EARLIEST
// dir/md_journal/LATEST
// dir/md_journal/0...001
// dir/md_journal/0...002
// dir/md_journal/0...fff
// dir/mds/0100/0...01/data
// dir/mds/0100/0...01/info.json
// ...
// dir/mds/01ff/f...ff/data
// dir/mds/01ff/f...ff/info.json
// dir/wkbv3/0100...01
// ...
// dir/wkbv3/0100...ff
// dir/rkbv3/0100...01
// ...
// dir/rkbv3/0100...ff
//
// There's a single journal subdirectory; the journal ordinals are
// just MetadataRevisions, and the journal entries are just MdIDs.
//
// The Metadata objects are stored separately in dir/mds. Each MD has
// its own subdirectory with its ID truncated to 17 bytes (34
// characters) as a name. The MD subdirectories are splayed over (# of
// possible hash types) * 256 subdirectories -- one byte for the hash
// type (currently only one) plus the first byte of the hash data --
// using the first four characters of the name to keep the number of
// directories in dir itself to a manageable number, similar to git.
// Each block directory has data, which is the raw MD data that should
// hash to the MD ID, and info.json, which contains the version and
// timestamp info for that MD. Future versions of the journal might
// add more files to this directory; if any code is written to move
// MDs around, it should be careful to preserve any unknown files in
// an MD directory.
//
// Writer (reader) key bundles for V3 metadata objects are stored
// separately in dir/wkbv3 (dir/rkbv3). The number of bundles is
// small, so no need to splay them.
//
// TODO: Garbage-collect unreferenced key bundles.
//
// The maximum number of characters added to the root dir by an MD
// journal is 50:
//
// /mds/01ff/f...(30 characters total)...ff/info.json
//
// This covers even the temporary files created in convertToBranch and
// resolveAndClear, which create paths like
//
// /md_journal123456789/0...(16 characters total)...001
//
// which have only 37 characters.
//
// mdJournal is not goroutine-safe, so any code that uses it must
// guarantee that only one goroutine at a time calls its functions.
type mdJournal struct {
// key is assumed to be the VerifyingKey of a device owned by
// uid, and both uid and key are assumed constant for the
// lifetime of this object.
uid keybase1.UID
key kbfscrypto.VerifyingKey
codec kbfscodec.Codec
crypto cryptoPure
clock Clock
tlfID tlf.ID
mdVer MetadataVer
dir string
log logger.Logger
deferLog logger.Logger
j mdIDJournal
// This doesn't need to be persisted, even if the journal
// becomes empty, since on a restart the branch ID is
// retrieved from the server (via GetUnmergedForTLF).
branchID BranchID
// Set only when the journal becomes empty due to
// flushing. This doesn't need to be persisted for the same
// reason as branchID.
lastMdID MdID
}
func makeMDJournalWithIDJournal(
uid keybase1.UID, key kbfscrypto.VerifyingKey, codec kbfscodec.Codec,
crypto cryptoPure, clock Clock, tlfID tlf.ID,
mdVer MetadataVer, dir string, idJournal mdIDJournal,
log logger.Logger) (*mdJournal, error) {
if uid == keybase1.UID("") {
return nil, errors.New("Empty user")
}
if key == (kbfscrypto.VerifyingKey{}) {
return nil, errors.New("Empty verifying key")
}
deferLog := log.CloneWithAddedDepth(1)
journal := mdJournal{
uid: uid,
key: key,
codec: codec,
crypto: crypto,
clock: clock,
tlfID: tlfID,
mdVer: mdVer,
dir: dir,
log: log,
deferLog: deferLog,
j: idJournal,
}
_, earliest, _, _, err := journal.getEarliestWithExtra(false)
if err != nil {
return nil, err
}
latest, err := journal.getLatest(false)
if err != nil {
return nil, err
}
if (earliest == nil) != (latest == ImmutableBareRootMetadata{}) {
return nil, fmt.Errorf("has earliest=%t != has latest=%t",
earliest != nil,
latest != ImmutableBareRootMetadata{})
}
if earliest != nil {
if earliest.BID() != latest.BID() {
return nil, fmt.Errorf(
"earliest.BID=%s != latest.BID=%s",
earliest.BID(), latest.BID())
}
log.CDebugf(nil, "Initializing with branch ID %s", earliest.BID())
journal.branchID = earliest.BID()
}
return &journal, nil
}
func makeMDJournal(
uid keybase1.UID, key kbfscrypto.VerifyingKey, codec kbfscodec.Codec,
crypto cryptoPure, clock Clock, tlfID tlf.ID,
mdVer MetadataVer, dir string,
log logger.Logger) (*mdJournal, error) {
journalDir := filepath.Join(dir, "md_journal")
return makeMDJournalWithIDJournal(
uid, key, codec, crypto, clock, tlfID, mdVer, dir,
makeMdIDJournal(codec, journalDir), log)
}
// The functions below are for building various paths.
func (j mdJournal) mdsPath() string {
return filepath.Join(j.dir, "mds")
}
// The final components of the paths below are truncated to 34
// characters, which corresponds to 16 random bytes (since the first
// byte is a hash type) or 128 random bits, which means that the
// expected number of MDs generated before getting a path collision is
// 2^64 (see
// https://en.wikipedia.org/wiki/Birthday_problem#Cast_as_a_collision_problem
// ). The full ID can be recovered just by hashing the data again with
// the same hash type.
func (j mdJournal) writerKeyBundleV3Path(id TLFWriterKeyBundleID) string {
idStr := id.String()
return filepath.Join(j.dir, "wkbv3", idStr[:34])
}
func (j mdJournal) readerKeyBundleV3Path(id TLFReaderKeyBundleID) string {
idStr := id.String()
return filepath.Join(j.dir, "rkbv3", idStr[:34])
}
func (j mdJournal) mdPath(id MdID) string {
idStr := id.String()
return filepath.Join(j.mdsPath(), idStr[:4], idStr[4:34])
}
func (j mdJournal) mdDataPath(id MdID) string {
return filepath.Join(j.mdPath(id), "data")
}
func (j mdJournal) mdInfoPath(id MdID) string {
return filepath.Join(j.mdPath(id), "info.json")
}
// mdInfo is the structure stored in mdInfoPath(id).
//
// TODO: Handle unknown fields? We'd have to build a handler for this,
// since the Go JSON library doesn't support it natively.
type mdInfo struct {
Timestamp time.Time
Version MetadataVer
}
func (j mdJournal) getMDInfo(id MdID) (time.Time, MetadataVer, error) {
infoJSON, err := ioutil.ReadFile(j.mdInfoPath(id))
if err != nil {
return time.Time{}, MetadataVer(-1), err
}
var info mdInfo
err = json.Unmarshal(infoJSON, &info)
if err != nil {
return time.Time{}, MetadataVer(-1), err
}
return info.Timestamp, info.Version, nil
}
// putMDInfo assumes that the parent directory of j.mdInfoPath(id)
// (which is j.mdPath(id)) has already been created.
func (j mdJournal) putMDInfo(
id MdID, timestamp time.Time, version MetadataVer) error {
info := mdInfo{timestamp, version}
infoJSON, err := json.Marshal(info)
if err != nil {
return err
}
return ioutil.WriteFile(j.mdInfoPath(id), infoJSON, 0600)
}
// getExtraMetadata gets the extra metadata corresponding to the given
// IDs, if any, after checking them.
func (j mdJournal) getExtraMetadata(
wkbID TLFWriterKeyBundleID, rkbID TLFReaderKeyBundleID) (
ExtraMetadata, error) {
if (wkbID == TLFWriterKeyBundleID{}) !=
(rkbID == TLFReaderKeyBundleID{}) {
return nil, fmt.Errorf(
"wkbID is empty (%t) != rkbID is empty (%t)",
wkbID == TLFWriterKeyBundleID{},
rkbID == TLFReaderKeyBundleID{})
}
if wkbID == (TLFWriterKeyBundleID{}) {
return nil, nil
}
var wkb TLFWriterKeyBundleV3
err := kbfscodec.DeserializeFromFile(
j.codec, j.writerKeyBundleV3Path(wkbID), &wkb)
if err != nil {
return nil, err
}
var rkb TLFReaderKeyBundleV3
err = kbfscodec.DeserializeFromFile(
j.codec, j.readerKeyBundleV3Path(rkbID), &rkb)
if err != nil {
return nil, err
}
err = checkKeyBundlesV3(j.crypto, wkbID, rkbID, &wkb, &rkb)
if err != nil {
return nil, err
}
return &ExtraMetadataV3{wkb: &wkb, rkb: &rkb}, nil
}
func (j mdJournal) putExtraMetadata(
rmd BareRootMetadata, extra ExtraMetadata) error {
if extra == nil {
return nil
}
wkbID := rmd.GetTLFWriterKeyBundleID()
if wkbID == (TLFWriterKeyBundleID{}) {
panic("writer key bundle ID is empty")
}
rkbID := rmd.GetTLFReaderKeyBundleID()
if rkbID == (TLFReaderKeyBundleID{}) {
panic("reader key bundle ID is empty")
}
extraV3, ok := extra.(*ExtraMetadataV3)
if !ok {
return errors.New("Invalid extra metadata")
}
err := checkKeyBundlesV3(
j.crypto, wkbID, rkbID, extraV3.wkb, extraV3.rkb)
if err != nil {
return err
}
err = kbfscodec.SerializeToFile(
j.codec, extraV3.wkb, j.writerKeyBundleV3Path(wkbID))
if err != nil {
return err
}
err = kbfscodec.SerializeToFile(
j.codec, extraV3.rkb, j.readerKeyBundleV3Path(rkbID))
if err != nil {
return err
}
return nil
}
// getMDAndExtra verifies the MD data, the writer signature (but not
// the key), and the extra metadata for the given ID and returns
// them. It also returns the last-modified timestamp of the
// file. verifyBranchID should be false only when called from
// makeMDJournal, i.e. when figuring out what to set j.branchID in the
// first place.
//
// It returns a MutableBareRootMetadata so that it can be put in a
// RootMetadataSigned object.
func (j mdJournal) getMDAndExtra(id MdID, verifyBranchID bool) (
MutableBareRootMetadata, ExtraMetadata, time.Time, error) {
// Read info.
timestamp, version, err := j.getMDInfo(id)
if err != nil {
return nil, nil, time.Time{}, err
}
// Read data.
data, err := ioutil.ReadFile(j.mdDataPath(id))
if err != nil {
return nil, nil, time.Time{}, err
}
rmd, err := DecodeRootMetadata(
j.codec, j.tlfID, version, j.mdVer, data)
if err != nil {
return nil, nil, time.Time{}, err
}
// Check integrity.
mdID, err := j.crypto.MakeMdID(rmd)
if err != nil {
return nil, nil, time.Time{}, err
}
if mdID != id {
return nil, nil, time.Time{}, fmt.Errorf(
"Metadata ID mismatch: expected %s, got %s", id, mdID)
}
err = rmd.IsLastModifiedBy(j.uid, j.key)
if err != nil {
return nil, nil, time.Time{}, err
}
extra, err := j.getExtraMetadata(
rmd.GetTLFWriterKeyBundleID(), rmd.GetTLFReaderKeyBundleID())
if err != nil {
return nil, nil, time.Time{}, err
}
err = rmd.IsValidAndSigned(j.codec, j.crypto, extra)
if err != nil {
return nil, nil, time.Time{}, err
}
if verifyBranchID && rmd.BID() != j.branchID {
return nil, nil, time.Time{}, fmt.Errorf(
"Branch ID mismatch: expected %s, got %s",
j.branchID, rmd.BID())
}
return rmd, extra, timestamp, nil
}
// putMD stores the given metadata under its ID, if it's not already
// stored. The extra metadata is put separately, since sometimes,
// (e.g., when converting to a branch) we don't need to put it.
func (j mdJournal) putMD(rmd BareRootMetadata) (MdID, error) {
err := rmd.IsLastModifiedBy(j.uid, j.key)
if err != nil {
return MdID{}, err
}
id, err := j.crypto.MakeMdID(rmd)
if err != nil {
return MdID{}, err
}
_, _, _, err = j.getMDAndExtra(id, true)
if os.IsNotExist(err) {
// Continue on.
} else if err != nil {
return MdID{}, err
} else {
// Entry exists, so nothing else to do.
return MdID{}, nil
}
buf, err := j.codec.Encode(rmd)
if err != nil {
return MdID{}, err
}
err = os.MkdirAll(j.mdPath(id), 0700)
if err != nil {
return MdID{}, err
}
err = ioutil.WriteFile(j.mdDataPath(id), buf, 0600)
if err != nil {
return MdID{}, err
}
err = j.putMDInfo(id, j.clock.Now(), rmd.Version())
if err != nil {
return MdID{}, err
}
return id, nil
}
// removeMD removes the metadata (which must exist) with the given ID.
func (j *mdJournal) removeMD(id MdID) error {
path := j.mdPath(id)
err := os.RemoveAll(path)
if err != nil {
return err
}
// Remove the parent (splayed) directory (which should exist)
// if it's empty.
err = os.Remove(filepath.Dir(path))
if isExist(err) {
err = nil
}
return err
}
// getEarliestWithExtra returns a MutableBareRootMetadata so that it
// can be put in a RootMetadataSigned object.
func (j mdJournal) getEarliestWithExtra(verifyBranchID bool) (
MdID, MutableBareRootMetadata, ExtraMetadata, time.Time, error) {
entry, exists, err := j.j.getEarliestEntry()
if err != nil {
return MdID{}, nil, nil, time.Time{}, err
}
if !exists {
return MdID{}, nil, nil, time.Time{}, nil
}
earliestID := entry.ID
earliest, extra, timestamp, err :=
j.getMDAndExtra(earliestID, verifyBranchID)
if err != nil {
return MdID{}, nil, nil, time.Time{}, err
}
return earliestID, earliest, extra, timestamp, nil
}
func (j mdJournal) getLatest(verifyBranchID bool) (
ImmutableBareRootMetadata, error) {
entry, exists, err := j.j.getLatestEntry()
if err != nil {
return ImmutableBareRootMetadata{}, err
}
if !exists {
return ImmutableBareRootMetadata{}, nil
}
latestID := entry.ID
latest, extra, timestamp, err := j.getMDAndExtra(
latestID, verifyBranchID)
if err != nil {
return ImmutableBareRootMetadata{}, err
}
return MakeImmutableBareRootMetadata(
latest, extra, latestID, timestamp), nil
}
func (j mdJournal) checkGetParams() (ImmutableBareRootMetadata, error) {
head, err := j.getLatest(true)
if err != nil {
return ImmutableBareRootMetadata{}, err
}
if head == (ImmutableBareRootMetadata{}) {
return ImmutableBareRootMetadata{}, nil
}
ok, err := isReader(j.uid, head.BareRootMetadata, head.extra)
if err != nil {
return ImmutableBareRootMetadata{}, err
}
if !ok {
// TODO: Use a non-server error.
return ImmutableBareRootMetadata{}, MDServerErrorUnauthorized{}
}
return head, nil
}
func (j *mdJournal) convertToBranch(
ctx context.Context, bid BranchID, signer kbfscrypto.Signer,
codec kbfscodec.Codec, tlfID tlf.ID, mdcache MDCache) (err error) {
if j.branchID != NullBranchID {
return fmt.Errorf(
"convertToBranch called with j.branchID=%s", j.branchID)
}
if bid == NullBranchID {
return fmt.Errorf(
"convertToBranch called with null branchID")
}
earliestRevision, err := j.j.readEarliestRevision()
if err != nil {
return err
}
latestRevision, err := j.j.readLatestRevision()
if err != nil {
return err
}
j.log.CDebugf(
ctx, "rewriting MDs %s to %s", earliestRevision, latestRevision)
_, allEntries, err := j.j.getEntryRange(
earliestRevision, latestRevision)
if err != nil {
return err
}
j.log.CDebugf(ctx, "New branch ID=%s", bid)
journalTempDir, err := ioutil.TempDir(j.dir, "md_journal")
if err != nil {
return err
}
j.log.CDebugf(ctx, "Using temp dir %s for rewriting", journalTempDir)
mdsToRemove := make([]MdID, 0, len(allEntries))
defer func() {
j.log.CDebugf(ctx, "Removing temp dir %s and %d old MDs",
journalTempDir, len(mdsToRemove))
removeErr := os.RemoveAll(journalTempDir)
if removeErr != nil {
j.log.CWarningf(ctx,
"Error when removing temp dir %s: %v",
journalTempDir, removeErr)
}
// Garbage-collect the unnecessary MD entries. TODO: we'll
// eventually need a sweeper to clean up entries left behind
// if we crash here.
for _, id := range mdsToRemove {
removeErr := j.removeMD(id)
if removeErr != nil {
j.log.CWarningf(ctx, "Error when removing old MD %s: %v",
id, removeErr)
}
}
}()
tempJournal := makeMdIDJournal(j.codec, journalTempDir)
var prevID MdID
for i, entry := range allEntries {
brmd, _, ts, err := j.getMDAndExtra(entry.ID, true)
if err != nil {
return err
}
brmd.SetUnmerged()
brmd.SetBranchID(bid)
// Re-sign the writer metadata internally, since we
// changed it.
err = brmd.SignWriterMetadataInternally(ctx, j.codec, signer)
if err != nil {
return err
}
j.log.CDebugf(ctx, "Old prev root of rev=%s is %s",
brmd.RevisionNumber(), brmd.GetPrevRoot())
if i > 0 {
j.log.CDebugf(ctx, "Changing prev root of rev=%s to %s",
brmd.RevisionNumber(), prevID)
brmd.SetPrevRoot(prevID)
}
// TODO: this rewrites the file, and so the modification time
// no longer tracks when exactly the original operation is
// done, so future ImmutableBareMetadatas for this MD will
// have a slightly wrong localTimestamp. Instead, we might
// want to pass in the timestamp and do an explicit
// os.Chtimes() on the file after writing it.
newID, err := j.putMD(brmd)
if err != nil {
return err
}
mdsToRemove = append(mdsToRemove, newID)
// Preserve unknown fields from the old journal.
newEntry := entry
newEntry.ID = newID
err = tempJournal.append(brmd.RevisionNumber(), newEntry)
if err != nil {
return err
}
prevID = newID
// If possible, replace the old RMD in the cache. If it's not
// already in the cache, don't bother adding it, as that will
// just evict something incorrectly. If it's been replaced by
// the REAL commit from the master branch due to a race, don't
// clobber that real commit. TODO: Don't replace the MD until
// we know for sure that the branch conversion succeeds
// (however, the Replace doesn't affect correctness since the
// original commit will be read from disk instead of the cache
// in the event of a conversion failure).
oldIrmd, err := mdcache.Get(
tlfID, brmd.RevisionNumber(), NullBranchID)
if err == nil && entry.ID == oldIrmd.mdID {
newRmd, err := oldIrmd.deepCopy(codec)
if err != nil {
return err
}
newRmd.bareMd = brmd
// Everything else is the same.
err = mdcache.Replace(
MakeImmutableRootMetadata(newRmd,
oldIrmd.LastModifyingWriterVerifyingKey(),
newID, ts),
NullBranchID)
if err != nil {
return err
}
}
j.log.CDebugf(ctx, "Changing ID for rev=%s from %s to %s",
brmd.RevisionNumber(), entry.ID, newID)
}
// TODO: Do the below atomically on the filesystem
// level. Specifically, make "md_journal" always be a symlink,
// and then perform the swap by atomically changing the
// symlink to point to the new journal directory.
oldJournalTempDir := journalTempDir + ".old"
dir, err := j.j.move(oldJournalTempDir)
if err != nil {
return err
}
j.log.CDebugf(ctx, "Moved old journal from %s to %s",
dir, oldJournalTempDir)
newJournalOldDir, err := tempJournal.move(dir)
if err != nil {
return err
}
j.log.CDebugf(ctx, "Moved new journal from %s to %s",
newJournalOldDir, dir)
// Make the defer block above remove oldJournalTempDir.
journalTempDir = oldJournalTempDir
mdsToRemove = nil
for _, entry := range allEntries {
mdsToRemove = append(mdsToRemove, entry.ID)
}
j.j = tempJournal
j.branchID = bid
return nil
}
// getNextEntryToFlush returns the info for the next journal entry to
// flush, if it exists, and its revision is less than end. If there is
// no next journal entry to flush, the returned MdID will be zero, and
// the returned *RootMetadataSigned will be nil.
func (j mdJournal) getNextEntryToFlush(
ctx context.Context, end MetadataRevision, signer kbfscrypto.Signer) (
MdID, *RootMetadataSigned, ExtraMetadata, error) {
mdID, rmd, extra, timestamp, err := j.getEarliestWithExtra(true)
if err != nil {
return MdID{}, nil, nil, err
}
if rmd == nil || rmd.RevisionNumber() >= end {
return MdID{}, nil, nil, nil
}
rmds, err := SignBareRootMetadata(
ctx, j.codec, signer, signer, rmd, timestamp)
if err != nil {
return MdID{}, nil, nil, err
}
return mdID, rmds, extra, nil
}
func (j *mdJournal) removeFlushedEntry(
ctx context.Context, mdID MdID, rmds *RootMetadataSigned) error {
rmdID, rmd, _, _, err := j.getEarliestWithExtra(true)
if err != nil {
return err
}
if rmd == nil {
return errors.New("mdJournal unexpectedly empty")
}
if mdID != rmdID {
return fmt.Errorf("Expected mdID %s, got %s", mdID, rmdID)
}
eq, err := kbfscodec.Equal(j.codec, rmd, rmds.MD)
if err != nil {
return err
}
if !eq {
return errors.New(
"Given RootMetadataSigned doesn't match earliest")
}
empty, err := j.j.removeEarliest()
if err != nil {
return err
}
// Since the journal is now empty, set lastMdID.
if empty {
j.log.CDebugf(ctx,
"Journal is now empty; saving last MdID=%s", mdID)
j.lastMdID = mdID
}
// Garbage-collect the old entry. TODO: we'll eventually need a
// sweeper to clean up entries left behind if we crash here.
return j.removeMD(mdID)
}
func getMdID(ctx context.Context, mdserver MDServer, crypto cryptoPure,
tlfID tlf.ID, bid BranchID, mStatus MergeStatus,
revision MetadataRevision) (MdID, error) {
rmdses, err := mdserver.GetRange(
ctx, tlfID, bid, mStatus, revision, revision)
if err != nil {
return MdID{}, err
} else if len(rmdses) == 0 {
return MdID{}, nil
} else if len(rmdses) > 1 {
return MdID{}, fmt.Errorf(
"Got more than one object when trying to get rev=%d for branch %s of TLF %s",
revision, bid, tlfID)
}
return crypto.MakeMdID(rmdses[0].MD)
}
// All functions below are public functions.
func (j mdJournal) readEarliestRevision() (MetadataRevision, error) {
return j.j.readEarliestRevision()
}
func (j mdJournal) readLatestRevision() (MetadataRevision, error) {
return j.j.readLatestRevision()
}
func (j mdJournal) length() (uint64, error) {
return j.j.length()
}
func (j mdJournal) end() (MetadataRevision, error) {
return j.j.end()
}
func (j mdJournal) getBranchID() BranchID {
return j.branchID
}
func (j mdJournal) getHead() (ImmutableBareRootMetadata, error) {
return j.checkGetParams()
}
func (j mdJournal) getRange(start, stop MetadataRevision) (
[]ImmutableBareRootMetadata, error) {
_, err := j.checkGetParams()
if err != nil {
return nil, err
}
realStart, entries, err := j.j.getEntryRange(start, stop)
if err != nil {
return nil, err
}
var ibrmds []ImmutableBareRootMetadata
for i, entry := range entries {
expectedRevision := realStart + MetadataRevision(i)
brmd, extra, ts, err := j.getMDAndExtra(entry.ID, true)
if err != nil {
return nil, err
}
if expectedRevision != brmd.RevisionNumber() {
panic(fmt.Errorf("expected revision %v, got %v",
expectedRevision, brmd.RevisionNumber()))
}
ibrmd := MakeImmutableBareRootMetadata(
brmd, extra, entry.ID, ts)
ibrmds = append(ibrmds, ibrmd)
}
return ibrmds, nil
}
// MDJournalConflictError is an error that is returned when a put
// detects a rewritten journal.
type MDJournalConflictError struct{}
func (e MDJournalConflictError) Error() string {
return "MD journal conflict error"
}
// put verifies and stores the given RootMetadata in the journal,
// modifying it as needed. In particular, there are four cases:
//
// Merged
// ------
// rmd is merged. If the journal is empty, then rmd becomes the
// initial entry. Otherwise, if the journal has been converted to a
// branch, then an MDJournalConflictError error is returned, and the
// caller is expected to set the unmerged bit and retry (see case
// Unmerged-1). Otherwise, either rmd must be the successor to the
// journal's head, in which case it is appended, or it must have the
// same revision number as the journal's head, in which case it
// replaces the journal's head. (This is necessary since if a journal
// put is cancelled and an error is returned, it still happens, and so
// we want the retried put (if any) to not conflict with it.)
//
// Unmerged-1
// ----------
// rmd is unmerged and has a null branch ID. This happens when case
// Merged returns with MDJournalConflictError. In this case, the rmd's
// branch ID is set to the journal's branch ID and its prevRoot is set
// to the last known journal root. It doesn't matter if the journal is
// completely drained, since the branch ID and last known root is
// remembered in memory. However, since this cache isn't persisted to
// disk, we need case Unmerged-3. Similarly to case Merged, this case
// then also does append-or-replace.
//
// Unmerged-2
// ----------
// rmd is unmerged and has a non-null branch ID, and the journal was
// non-empty at some time during this process's lifetime. Similarly to
// case Merged, if the journal is empty, then rmd becomes the initial
// entry, and otherwise, this case does append-or-replace.
//
// Unmerged-3
// ----------
// rmd is unmerged and has a non-null branch ID, and the journal has
// always been empty during this process's lifetime. The branch ID is
// assumed to be correct, i.e. retrieved from the remote MDServer, and
// rmd becomes the initial entry.
func (j *mdJournal) put(
ctx context.Context, signer kbfscrypto.Signer,
ekg encryptionKeyGetter, bsplit BlockSplitter, rmd *RootMetadata) (
mdID MdID, err error) {
j.log.CDebugf(ctx, "Putting MD for TLF=%s with rev=%s bid=%s",
rmd.TlfID(), rmd.Revision(), rmd.BID())
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Put MD for TLF=%s with rev=%s bid=%s failed with %v",
rmd.TlfID(), rmd.Revision(), rmd.BID(), err)
}
}()
extra := rmd.extra
if extra == nil {
// TODO: This could fail if the key bundle isn't part
// of the journal. Always mandate that the extra field
// be plumbed through with a RootMetadata, and keep
// around a flag as to whether it should be sent up to
// the remote MDServer.
var err error
extra, err = j.getExtraMetadata(
rmd.bareMd.GetTLFWriterKeyBundleID(),
rmd.bareMd.GetTLFReaderKeyBundleID())
if err != nil {
return MdID{}, err
}
}
head, err := j.getLatest(true)
if err != nil {
return MdID{}, err
}
mStatus := rmd.MergedStatus()
// Make modifications for the Unmerged cases.
if mStatus == Unmerged {
var lastMdID MdID
if head == (ImmutableBareRootMetadata{}) {
lastMdID = j.lastMdID
} else {
lastMdID = head.mdID
}
if rmd.BID() == NullBranchID && j.branchID == NullBranchID {
return MdID{}, errors.New(
"Unmerged put with rmd.BID() == j.branchID == NullBranchID")
}
if head == (ImmutableBareRootMetadata{}) &&
j.branchID == NullBranchID {
// Case Unmerged-3.
j.branchID = rmd.BID()
// Revert branch ID if we encounter an error.
defer func() {
if err != nil {
j.branchID = NullBranchID
}
}()
} else if rmd.BID() == NullBranchID {
// Case Unmerged-1.
j.log.CDebugf(
ctx, "Changing branch ID to %s and prev root to %s for MD for TLF=%s with rev=%s",
j.branchID, lastMdID, rmd.TlfID(), rmd.Revision())
rmd.SetBranchID(j.branchID)
rmd.SetPrevRoot(lastMdID)
} else {
// Using de Morgan's laws, this branch is
// taken when both rmd.BID() is non-null, and
// either head is non-empty or j.branchID is
// non-empty. So this is most of case
// Unmerged-2, and there's nothing to do.
//
// The remaining part of case Unmerged-2,
// where rmd.BID() is non-null, head is empty,
// and j.branchID is empty, is an error case,
// handled below.
}
}
// The below is code common to all the cases.
if (mStatus == Merged) != (rmd.BID() == NullBranchID) {
return MdID{}, fmt.Errorf(
"mStatus=%s doesn't match bid=%s", mStatus, rmd.BID())
}
// If we're trying to push a merged MD onto a branch, return a
// conflict error so the caller can retry with an unmerged MD.
if mStatus == Merged && j.branchID != NullBranchID {
return MdID{}, MDJournalConflictError{}
}
if rmd.BID() != j.branchID {
return MdID{}, fmt.Errorf(
"Branch ID mismatch: expected %s, got %s",
j.branchID, rmd.BID())
}
// Check permissions and consistency with head, if it exists.
if head != (ImmutableBareRootMetadata{}) {
ok, err := isWriterOrValidRekey(
j.codec, j.uid, head.BareRootMetadata, rmd.bareMd,
head.extra, extra)
if err != nil {
return MdID{}, err
}
if !ok {
// TODO: Use a non-server error.
return MdID{}, MDServerErrorUnauthorized{}
}
// Consistency checks
if rmd.Revision() != head.RevisionNumber() {
err = head.CheckValidSuccessorForServer(
head.mdID, rmd.bareMd)
if err != nil {
return MdID{}, err
}
}
}
// Ensure that the block changes are properly unembedded.
if rmd.data.Changes.Info.BlockPointer == zeroPtr &&
!bsplit.ShouldEmbedBlockChanges(&rmd.data.Changes) {
return MdID{},
errors.New("MD has embedded block changes, but shouldn't")
}
err = encryptMDPrivateData(
ctx, j.codec, j.crypto, signer, ekg, j.uid, rmd)
if err != nil {
return MdID{}, err
}
err = rmd.bareMd.IsValidAndSigned(j.codec, j.crypto, extra)
if err != nil {
return MdID{}, err
}
id, err := j.putMD(rmd.bareMd)
if err != nil {
return MdID{}, err
}
err = j.putExtraMetadata(rmd.bareMd, extra)
if err != nil {
return MdID{}, err
}
if head != (ImmutableBareRootMetadata{}) &&
rmd.Revision() == head.RevisionNumber() {
j.log.CDebugf(
ctx, "Replacing head MD for TLF=%s with rev=%s bid=%s",
rmd.TlfID(), rmd.Revision(), rmd.BID())
// Don't try and preserve unknown fields from the old
// head here -- the new head is in general a different
// MD, so the unknown fields from the old head won't
// make sense.
err = j.j.replaceHead(mdIDJournalEntry{ID: id})
if err != nil {
return MdID{}, err
}
} else {
err = j.j.append(rmd.Revision(), mdIDJournalEntry{ID: id})
if err != nil {
return MdID{}, err
}
}
// Since the journal is now non-empty, clear lastMdID.
j.lastMdID = MdID{}
return id, nil
}
func (j *mdJournal) clear(
ctx context.Context, bid BranchID) (err error) {
j.log.CDebugf(ctx, "Clearing journal for branch %s", bid)
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Clearing journal for branch %s failed with %v",
bid, err)
}
}()
if bid == NullBranchID {
return errors.New("Cannot clear master branch")
}
if j.branchID != bid {
// Nothing to do.
j.log.CDebugf(ctx, "Ignoring clear for branch %s while on branch %s",
bid, j.branchID)
return nil
}
head, err := j.getHead()
if err != nil {
return err
}
if head == (ImmutableBareRootMetadata{}) {
// The journal has been flushed but not cleared yet.
j.branchID = NullBranchID
return nil
}
if head.BID() != j.branchID {
return fmt.Errorf("Head branch ID %s doesn't match journal "+
"branch ID %s while clearing", head.BID(), j.branchID)
}
earliestRevision, err := j.j.readEarliestRevision()
if err != nil {
return err
}
latestRevision, err := j.j.readLatestRevision()
if err != nil {
return err
}
_, allEntries, err := j.j.getEntryRange(
earliestRevision, latestRevision)
if err != nil {
return err
}
j.branchID = NullBranchID
// No need to set lastMdID in this case.
err = j.j.clear()
if err != nil {
return err
}
// Garbage-collect the old branch entries. TODO: we'll eventually
// need a sweeper to clean up entries left behind if we crash
// here.
for _, entry := range allEntries {
err := j.removeMD(entry.ID)
if err != nil {
return err
}
}
return nil
}
func (j *mdJournal) resolveAndClear(
ctx context.Context, signer kbfscrypto.Signer, ekg encryptionKeyGetter,
bsplit BlockSplitter, mdcache MDCache, bid BranchID, rmd *RootMetadata) (
mdID MdID, err error) {
j.log.CDebugf(ctx, "Resolve and clear, branch %s, resolve rev %d",
bid, rmd.Revision())
defer func() {
if err != nil {
j.deferLog.CDebugf(ctx,
"Resolving journal for branch %s failed with %v",
bid, err)
}
}()
// The resolution must not have a branch ID.
if rmd.BID() != NullBranchID {
return MdID{}, fmt.Errorf("Resolution MD has branch ID: %s", rmd.BID())
}
// The branch ID must match our current state.
if bid == NullBranchID {
return MdID{}, errors.New("Cannot resolve master branch")
}
if j.branchID != bid {
return MdID{}, fmt.Errorf("Resolve and clear for branch %s "+
"while on branch %s", bid, j.branchID)
}
earliestRevision, err := j.j.readEarliestRevision()
if err != nil {
return MdID{}, err
}
latestRevision, err := j.j.readLatestRevision()
if err != nil {
return MdID{}, err
}
// First make a new journal to hold the block.
// Give this new journal a new ID journal.
idJournalTempDir, err := ioutil.TempDir(j.dir, "md_journal")
if err != nil {
return MdID{}, err
}
j.log.CDebugf(ctx, "Using temp dir %s for new IDs", idJournalTempDir)
otherIDJournal := makeMdIDJournal(j.codec, idJournalTempDir)
defer func() {
j.log.CDebugf(ctx, "Removing temp dir %s", idJournalTempDir)
removeErr := os.RemoveAll(idJournalTempDir)
if removeErr != nil {
j.log.CWarningf(ctx,
"Error when removing temp dir %s: %v",
idJournalTempDir, removeErr)
}
}()
otherJournal, err := makeMDJournalWithIDJournal(
j.uid, j.key, j.codec, j.crypto, j.clock, j.tlfID, j.mdVer, j.dir,
otherIDJournal, j.log)
if err != nil {
return MdID{}, err
}
//otherJournal.branchID = NullBranchID
mdID, err = otherJournal.put(ctx, signer, ekg, bsplit, rmd)
if err != nil {
return MdID{}, err
}
// Transform this journal into the new one.
// TODO: Do the below atomically on the filesystem
// level. Specifically, make "md_journal" always be a symlink,
// and then perform the swap by atomically changing the
// symlink to point to the new journal directory.
oldIDJournalTempDir := idJournalTempDir + ".old"
dir, err := j.j.move(oldIDJournalTempDir)
if err != nil {
return MdID{}, err
}
j.log.CDebugf(ctx, "Moved old journal from %s to %s",
dir, oldIDJournalTempDir)
otherIDJournalOldDir, err := otherJournal.j.move(dir)
if err != nil {
return MdID{}, err
}
// Set new journal to one with the new revision.
j.log.CDebugf(ctx, "Moved new journal from %s to %s",
otherIDJournalOldDir, dir)
*j, *otherJournal = *otherJournal, *j
// Transform the other journal into the old journal, so we can
// clear it out.
err = otherJournal.clear(ctx, bid)
if err != nil {
return MdID{}, err
}
// Make the defer above remove the old temp dir.
idJournalTempDir = oldIDJournalTempDir
// Delete all of the branch MDs from the md cache.
for rev := earliestRevision; rev <= latestRevision; rev++ {
mdcache.Delete(j.tlfID, rev, bid)
}
return mdID, nil
}
| 1 | 14,369 | Just noticed this nil context. Perhaps plumb through ctx too, or change to `Debug`? | keybase-kbfs | go |
@@ -22,7 +22,7 @@ module Selenium
class Common
MAX_REDIRECTS = 20 # same as chromium/gecko
CONTENT_TYPE = 'application/json'.freeze
- DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE}.freeze
+ DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE, 'Content-Type' => 'application/x-www-form-urlencoded'}.freeze
attr_accessor :timeout
attr_writer :server_url | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Remote
module Http
class Common
MAX_REDIRECTS = 20 # same as chromium/gecko
CONTENT_TYPE = 'application/json'.freeze
DEFAULT_HEADERS = {'Accept' => CONTENT_TYPE}.freeze
attr_accessor :timeout
attr_writer :server_url
def initialize
@timeout = nil
end
def quit_errors
[IOError]
end
def close
# hook for subclasses - will be called on Driver#quit
end
def call(verb, url, command_hash)
url = server_url.merge(url) unless url.is_a?(URI)
headers = DEFAULT_HEADERS.dup
headers['Cache-Control'] = 'no-cache' if verb == :get
if command_hash
payload = JSON.generate(command_hash)
headers['Content-Type'] = "#{CONTENT_TYPE}; charset=utf-8"
headers['Content-Length'] = payload.bytesize.to_s if [:post, :put].include?(verb)
WebDriver.logger.info(" >>> #{url} | #{payload}")
WebDriver.logger.debug(" > #{headers.inspect}")
elsif verb == :post
payload = '{}'
headers['Content-Length'] = '2'
end
request verb, url, headers, payload
end
private
def server_url
return @server_url if @server_url
raise Error::WebDriverError, 'server_url not set'
end
def request(*)
raise NotImplementedError, 'subclass responsibility'
end
def create_response(code, body, content_type)
code = code.to_i
body = body.to_s.strip
content_type = content_type.to_s
WebDriver.logger.info("<- #{body}")
if content_type.include? CONTENT_TYPE
raise Error::WebDriverError, "empty body: #{content_type.inspect} (#{code})\n#{body}" if body.empty?
Response.new(code, JSON.parse(body))
elsif code == 204
Response.new(code)
else
msg = "unexpected response, code=#{code}, content-type=#{content_type.inspect}"
msg << "\n#{body}" unless body.empty?
raise Error::WebDriverError, msg
end
end
end # Common
end # Http
end # Remote
end # WebDriver
end # Selenium
| 1 | 15,359 | Does it send requests with urlencoded bodies anywhere? I thought it sends only json. Maybe content-type should be `application/json` by default? | SeleniumHQ-selenium | java |
@@ -50,6 +50,11 @@ namespace eprosima {
namespace fastrtps {
namespace rtps {
+// port use if the ros environment variable doesn't specified one
+const uint16_t DEFAULT_ROS2_SERVER_PORT = 11811;
+// default server guidPrefix
+const char* DEFAULT_ROS2_SERVER_GUIDPREFIX = "44.49.53.43.53.45.52.56.45.52.5F.30";
+
GUID_t RemoteServerAttributes::GetParticipant() const
{
return GUID_t(guidPrefix, c_EntityId_RTPSParticipant); | 1 | // Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file PDPClient.cpp
*
*/
#include <fastdds/rtps/builtin/discovery/participant/PDPClient.h>
#include <fastdds/rtps/builtin/discovery/participant/PDPListener.h>
#include <fastdds/rtps/builtin/discovery/participant/timedevent/DSClientEvent.h>
#include <fastdds/rtps/builtin/discovery/endpoint/EDPClient.h>
#include <fastdds/rtps/builtin/BuiltinProtocols.h>
#include <fastdds/rtps/builtin/liveliness/WLP.h>
#include <fastdds/rtps/participant/RTPSParticipantListener.h>
#include <fastdds/rtps/reader/StatefulReader.h>
#include <fastdds/rtps/writer/StatefulWriter.h>
#include <fastdds/rtps/writer/ReaderProxy.h>
#include <fastdds/rtps/history/WriterHistory.h>
#include <fastdds/rtps/history/ReaderHistory.h>
#include <fastrtps/utils/TimeConversion.h>
#include <rtps/builtin/discovery/participant/DirectMessageSender.hpp>
#include <rtps/participant/RTPSParticipantImpl.h>
#include <fastdds/dds/log/Log.hpp>
using namespace eprosima::fastrtps;
namespace eprosima {
namespace fastrtps {
namespace rtps {
GUID_t RemoteServerAttributes::GetParticipant() const
{
return GUID_t(guidPrefix, c_EntityId_RTPSParticipant);
}
GUID_t RemoteServerAttributes::GetPDPReader() const
{
return GUID_t(guidPrefix, c_EntityId_SPDPReader);
}
GUID_t RemoteServerAttributes::GetPDPWriter() const
{
return GUID_t(guidPrefix, c_EntityId_SPDPWriter);
}
GUID_t RemoteServerAttributes::GetEDPPublicationsReader() const
{
return GUID_t(guidPrefix, c_EntityId_SEDPPubReader);
}
GUID_t RemoteServerAttributes::GetEDPSubscriptionsWriter() const
{
return GUID_t(guidPrefix, c_EntityId_SEDPSubWriter);
}
GUID_t RemoteServerAttributes::GetEDPPublicationsWriter() const
{
return GUID_t(guidPrefix, c_EntityId_SEDPPubWriter);
}
GUID_t RemoteServerAttributes::GetEDPSubscriptionsReader() const
{
return GUID_t(guidPrefix, c_EntityId_SEDPSubReader);
}
PDPClient::PDPClient(
BuiltinProtocols* builtin,
const RTPSParticipantAllocationAttributes& allocation)
: PDP(builtin, allocation)
, mp_sync(nullptr)
, _serverPing(false)
{
}
PDPClient::~PDPClient()
{
if (mp_sync != nullptr)
{
delete mp_sync;
}
}
void PDPClient::initializeParticipantProxyData(
ParticipantProxyData* participant_data)
{
PDP::initializeParticipantProxyData(participant_data); // TODO: Remember that the PDP version USES security
if (getRTPSParticipant()->getAttributes().builtin.discovery_config.discoveryProtocol != DiscoveryProtocol_t::CLIENT)
{
logError(RTPS_PDP, "Using a PDP client object with another user's settings");
}
if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP.
use_PublicationWriterANDSubscriptionReader)
{
participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_ANNOUNCER;
participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_DETECTOR;
}
if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP.
use_PublicationReaderANDSubscriptionWriter)
{
participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_DETECTOR;
participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_ANNOUNCER;
}
//#if HAVE_SECURITY
// if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP
// .enable_builtin_secure_publications_writer_and_subscriptions_reader)
// {
// participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_ANNOUNCER;
// participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_DETECTOR;
// }
//
// if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP
// .enable_builtin_secure_subscriptions_writer_and_publications_reader)
// {
// participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_ANNOUNCER;
// participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_DETECTOR;
// }
//#endif
}
bool PDPClient::init(
RTPSParticipantImpl* part)
{
if (!PDP::initPDP(part))
{
return false;
}
/* We keep using EPDSimple notwithstanding its method EDPSimple::assignRemoteEndpoints regards
all server EDPs as TRANSIENT_LOCAL. Server builtin Writers are actually TRANSIENT.
Currently this mistake is not an issue but must be kept in mind if further development
justifies the creation of an EDPClient class.
*/
mp_EDP = new EDPClient(this, mp_RTPSParticipant);
if (!mp_EDP->initEDP(m_discovery))
{
logError(RTPS_PDP, "Endpoint discovery configuration failed");
return false;
}
mp_sync =
new DSClientEvent(this, TimeConv::Duration_t2MilliSecondsDouble(
m_discovery.discovery_config.discoveryServer_client_syncperiod));
mp_sync->restart_timer();
return true;
}
ParticipantProxyData* PDPClient::createParticipantProxyData(
const ParticipantProxyData& participant_data,
const GUID_t&)
{
std::unique_lock<std::recursive_mutex> lock(*getMutex());
// Verify if this participant is a server
bool is_server = false;
for (auto& svr : mp_builtin->m_DiscoveryServers)
{
if (svr.guidPrefix == participant_data.m_guid.guidPrefix)
{
is_server = true;
}
}
ParticipantProxyData* pdata = add_participant_proxy_data(participant_data.m_guid, is_server);
if (pdata != nullptr)
{
pdata->copy(participant_data);
pdata->isAlive = true;
// Clients only assert its server lifeliness, other clients liveliness is provided
// through server's PDP discovery data
if (is_server)
{
pdata->lease_duration_event->update_interval(pdata->m_leaseDuration);
pdata->lease_duration_event->restart_timer();
}
}
return pdata;
}
bool PDPClient::createPDPEndpoints()
{
logInfo(RTPS_PDP, "Beginning PDPClient Endpoints creation");
const NetworkFactory& network = mp_RTPSParticipant->network_factory();
HistoryAttributes hatt;
hatt.payloadMaxSize = mp_builtin->m_att.readerPayloadSize;
hatt.initialReservedCaches = pdp_initial_reserved_caches;
hatt.memoryPolicy = mp_builtin->m_att.readerHistoryMemoryPolicy;
mp_PDPReaderHistory = new ReaderHistory(hatt);
ReaderAttributes ratt;
ratt.expectsInlineQos = false;
ratt.endpoint.endpointKind = READER;
ratt.endpoint.multicastLocatorList = mp_builtin->m_metatrafficMulticastLocatorList;
ratt.endpoint.unicastLocatorList = mp_builtin->m_metatrafficUnicastLocatorList;
ratt.endpoint.topicKind = WITH_KEY;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.endpoint.reliabilityKind = RELIABLE;
ratt.times.heartbeatResponseDelay = pdp_heartbeat_response_delay;
mp_listener = new PDPListener(this);
if (mp_RTPSParticipant->createReader(&mp_PDPReader, ratt, mp_PDPReaderHistory, mp_listener,
c_EntityId_SPDPReader, true, false))
{
//#if HAVE_SECURITY
// mp_RTPSParticipant->set_endpoint_rtps_protection_supports(rout, false);
//#endif
// Initial peer list doesn't make sense in server scenario. Client should match its server list
for (const RemoteServerAttributes& it : mp_builtin->m_DiscoveryServers)
{
std::lock_guard<std::mutex> data_guard(temp_data_lock_);
temp_writer_data_.clear();
temp_writer_data_.guid(it.GetPDPWriter());
temp_writer_data_.set_multicast_locators(it.metatrafficMulticastLocatorList, network);
temp_writer_data_.set_remote_unicast_locators(it.metatrafficUnicastLocatorList, network);
temp_writer_data_.m_qos.m_durability.kind = TRANSIENT_DURABILITY_QOS; // Server Information must be persistent
temp_writer_data_.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
mp_PDPReader->matched_writer_add(temp_writer_data_);
}
}
else
{
logError(RTPS_PDP, "PDPClient Reader creation failed");
delete(mp_PDPReaderHistory);
mp_PDPReaderHistory = nullptr;
delete(mp_listener);
mp_listener = nullptr;
return false;
}
hatt.payloadMaxSize = mp_builtin->m_att.writerPayloadSize;
hatt.initialReservedCaches = pdp_initial_reserved_caches;
hatt.memoryPolicy = mp_builtin->m_att.writerHistoryMemoryPolicy;
mp_PDPWriterHistory = new WriterHistory(hatt);
WriterAttributes watt;
watt.endpoint.endpointKind = WRITER;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.topicKind = WITH_KEY;
watt.endpoint.multicastLocatorList = mp_builtin->m_metatrafficMulticastLocatorList;
watt.endpoint.unicastLocatorList = mp_builtin->m_metatrafficUnicastLocatorList;
watt.times.heartbeatPeriod = pdp_heartbeat_period;
watt.times.nackResponseDelay = pdp_nack_response_delay;
watt.times.nackSupressionDuration = pdp_nack_supression_duration;
if (mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod != UINT32_MAX &&
mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.periodMillisecs != 0)
{
watt.mode = ASYNCHRONOUS_WRITER;
}
if (mp_RTPSParticipant->createWriter(&mp_PDPWriter, watt, mp_PDPWriterHistory, nullptr,
c_EntityId_SPDPWriter, true))
{
//#if HAVE_SECURITY
// mp_RTPSParticipant->set_endpoint_rtps_protection_supports(wout, false);
//#endif
for (const RemoteServerAttributes& it : mp_builtin->m_DiscoveryServers)
{
std::lock_guard<std::mutex> data_guard(temp_data_lock_);
temp_reader_data_.clear();
temp_reader_data_.guid(it.GetPDPReader());
temp_reader_data_.set_multicast_locators(it.metatrafficMulticastLocatorList, network);
temp_reader_data_.set_remote_unicast_locators(it.metatrafficUnicastLocatorList, network);
temp_reader_data_.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS;
temp_reader_data_.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
mp_PDPWriter->matched_reader_add(temp_reader_data_);
}
}
else
{
logError(RTPS_PDP, "PDPClient Writer creation failed");
delete(mp_PDPWriterHistory);
mp_PDPWriterHistory = nullptr;
return false;
}
logInfo(RTPS_PDP, "PDPClient Endpoints creation finished");
return true;
}
// the ParticipantProxyData* pdata must be the one kept in PDP database
void PDPClient::assignRemoteEndpoints(
ParticipantProxyData* pdata)
{
{
std::unique_lock<std::recursive_mutex> lock(*getMutex());
// Verify if this participant is a server
for (auto& svr : mp_builtin->m_DiscoveryServers)
{
if (svr.guidPrefix == pdata->m_guid.guidPrefix)
{
svr.proxy = pdata;
}
}
}
notifyAboveRemoteEndpoints(*pdata);
}
void PDPClient::notifyAboveRemoteEndpoints(
const ParticipantProxyData& pdata)
{
// No EDP notification needed. EDP endpoints would be match when PDP synchronization is granted
if (mp_builtin->mp_WLP != nullptr)
{
mp_builtin->mp_WLP->assignRemoteEndpoints(pdata);
}
}
void PDPClient::removeRemoteEndpoints(
ParticipantProxyData* pdata)
{
// EDP endpoints have been already unmatch by the associated listener
assert(!mp_EDP->areRemoteEndpointsMatched(pdata));
bool is_server = false;
{
std::unique_lock<std::recursive_mutex> lock(*getMutex());
// Verify if this participant is a server
for (auto& svr : mp_builtin->m_DiscoveryServers)
{
if (svr.guidPrefix == pdata->m_guid.guidPrefix)
{
svr.proxy = nullptr; // reasign when we receive again server DATA(p)
is_server = true;
mp_sync->restart_timer(); // enable announcement and sync mechanism till this server reappears
}
}
}
if (is_server)
{
// We should unmatch and match the PDP endpoints to renew the PDP reader and writer associated proxies
logInfo(RTPS_PDP, "For unmatching for server: " << pdata->m_guid);
const NetworkFactory& network = mp_RTPSParticipant->network_factory();
uint32_t endp = pdata->m_availableBuiltinEndpoints;
uint32_t auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_PARTICIPANT_ANNOUNCER;
if (auxendp != 0)
{
GUID_t wguid;
wguid.guidPrefix = pdata->m_guid.guidPrefix;
wguid.entityId = c_EntityId_SPDPWriter;
mp_PDPReader->matched_writer_remove(wguid);
// rematch but discarding any previous state of the server
// because we know the server shutdown intencionally
std::lock_guard<std::mutex> data_guard(temp_data_lock_);
temp_writer_data_.clear();
temp_writer_data_.guid(wguid);
temp_writer_data_.persistence_guid(pdata->get_persistence_guid());
temp_writer_data_.set_persistence_entity_id(c_EntityId_SPDPWriter);
temp_writer_data_.set_remote_locators(pdata->metatraffic_locators, network, true);
temp_writer_data_.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
temp_writer_data_.m_qos.m_durability.kind = TRANSIENT_DURABILITY_QOS;
mp_PDPReader->matched_writer_add(temp_writer_data_);
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_PARTICIPANT_DETECTOR;
if (auxendp != 0)
{
GUID_t rguid;
rguid.guidPrefix = pdata->m_guid.guidPrefix;
rguid.entityId = c_EntityId_SPDPReader;
mp_PDPWriter->matched_reader_remove(rguid);
std::lock_guard<std::mutex> data_guard(temp_data_lock_);
temp_reader_data_.clear();
temp_reader_data_.m_expectsInlineQos = false;
temp_reader_data_.guid(rguid);
temp_reader_data_.set_remote_locators(pdata->metatraffic_locators, network, true);
temp_reader_data_.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
temp_reader_data_.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS;
mp_PDPWriter->matched_reader_add(temp_reader_data_);
}
}
}
bool PDPClient::all_servers_acknowledge_PDP()
{
// check if already initialized
assert(mp_PDPWriterHistory && mp_PDPWriter);
// get a reference to client proxy data
CacheChange_t* pPD;
if (mp_PDPWriterHistory->get_min_change(&pPD))
{
return mp_PDPWriter->is_acked_by_all(pPD);
}
else
{
logError(RTPS_PDP, "ParticipantProxy data should have been added to client PDP history cache "
"by a previous call to announceParticipantState()");
}
return false;
}
bool PDPClient::is_all_servers_PDPdata_updated()
{
// Assess all server DATA has been received
StatefulReader* pR = dynamic_cast<StatefulReader*>(mp_PDPReader);
assert(pR);
return pR->isInCleanState();
}
void PDPClient::announceParticipantState(
bool new_change,
bool dispose,
WriteParams& )
{
/*
Protect writer sequence number. Make sure in order to prevent AB BA deadlock that the
writer mutex is systematically lock before the PDP one (if needed):
- transport callbacks on PDPListener
- initialization and removal on BuiltinProtocols::initBuiltinProtocols and ~BuiltinProtocols
- DSClientEvent (own thread)
- ResendParticipantProxyDataPeriod (participant event thread)
*/
std::lock_guard<RecursiveTimedMutex> wlock(mp_PDPWriter->getMutex());
WriteParams wp;
SampleIdentity local;
local.writer_guid(mp_PDPWriter->getGuid());
local.sequence_number(mp_PDPWriterHistory->next_sequence_number());
wp.sample_identity(local);
wp.related_sample_identity(local);
// Add the write params to the sample
if (dispose)
{
// we must assure when the server is dying that all client are send at least a DATA(p)
// note here we can no longer receive and DATA or ACKNACK from clients.
// In order to avoid that we send the message directly as in the standard stateless PDP
StatefulWriter* pW = dynamic_cast<StatefulWriter*>(mp_PDPWriter);
assert(pW);
CacheChange_t* change = nullptr;
if ((change = pW->new_change(
[this]() -> uint32_t
{
return mp_builtin->m_att.writerPayloadSize;
},
NOT_ALIVE_DISPOSED_UNREGISTERED, getLocalParticipantProxyData()->m_key)))
{
// update the sequence number
change->sequenceNumber = mp_PDPWriterHistory->next_sequence_number();
change->write_params = wp;
std::vector<GUID_t> remote_readers;
LocatorList_t locators;
// TODO: modify announcement mechanism to allow direct message sending
//for (auto it = pW->matchedReadersBegin(); it != pW->matchedReadersEnd(); ++it)
//{
// RemoteReaderAttributes & att = (*it)->m_att;
// remote_readers.push_back(att.guid);
// EndpointAttributes & ep = att.endpoint;
// locators.push_back(ep.unicastLocatorList);
// //locators.push_back(ep.multicastLocatorList);
//}
{
// temporary workaround
std::lock_guard<std::recursive_mutex> lock(*getMutex());
for (auto& svr : mp_builtin->m_DiscoveryServers)
{
// if we are matched to a server report demise
if (svr.proxy != nullptr)
{
remote_readers.push_back(svr.GetPDPReader());
//locators.push_back(svr.metatrafficMulticastLocatorList);
locators.push_back(svr.metatrafficUnicastLocatorList);
}
}
}
DirectMessageSender sender(getRTPSParticipant(), &remote_readers, &locators);
RTPSMessageGroup group(getRTPSParticipant(), mp_PDPWriter, sender);
if (!group.add_data(*change, false))
{
logError(RTPS_PDP, "Error sending announcement from client to servers");
}
}
// free change
mp_PDPWriterHistory->release_Cache(change);
}
else
{
PDP::announceParticipantState(new_change, dispose, wp);
if (!new_change)
{
// retrieve the participant discovery data
CacheChange_t* pPD;
if (mp_PDPWriterHistory->get_min_change(&pPD))
{
std::lock_guard<std::recursive_mutex> lock(*getMutex());
std::vector<GUID_t> remote_readers;
LocatorList_t locators;
for (auto& svr : mp_builtin->m_DiscoveryServers)
{
// non-pinging announcements like lease duration ones must be
// broadcast to all servers
if (svr.proxy == nullptr || !_serverPing)
{
remote_readers.push_back(svr.GetPDPReader());
locators.push_back(svr.metatrafficMulticastLocatorList);
locators.push_back(svr.metatrafficUnicastLocatorList);
}
}
DirectMessageSender sender(getRTPSParticipant(), &remote_readers, &locators);
RTPSMessageGroup group(getRTPSParticipant(), mp_PDPWriter, sender);
if (!group.add_data(*pPD, false))
{
logError(RTPS_PDP, "Error sending announcement from client to servers");
}
// ping done independtly of which triggered the announcement
// note all event callbacks are currently serialized
_serverPing = false;
}
else
{
logError(RTPS_PDP, "ParticipantProxy data should have been added to client PDP history "
"cache by a previous call to announceParticipantState()");
}
}
}
}
bool PDPClient::match_servers_EDP_endpoints()
{
// PDP must have been initialize
assert(mp_EDP);
std::lock_guard<std::recursive_mutex> lock(*getMutex());
bool all = true; // have all servers been discovered?
for (auto& svr : mp_builtin->m_DiscoveryServers)
{
all &= (svr.proxy != nullptr);
if (svr.proxy && !mp_EDP->areRemoteEndpointsMatched(svr.proxy))
{
mp_EDP->assignRemoteEndpoints(*svr.proxy);
}
}
return all;
}
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 18,860 | Again use of `constexpr` is recomended. | eProsima-Fast-DDS | cpp |
@@ -8,6 +8,12 @@ from ..core.operation import Operation
from .chart import Points
from .path import Path
+try:
+ from datashader.layout import LayoutAlgorithm as ds_layout
+except:
+ ds_layout = None
+
+
class graph_redim(redim):
"""
Extension for the redim utility that allows re-dimensioning | 1 | import param
import numpy as np
from ..core import Dimension, Dataset, Element2D
from ..core.dimension import redim
from ..core.util import max_range
from ..core.operation import Operation
from .chart import Points
from .path import Path
class graph_redim(redim):
"""
Extension for the redim utility that allows re-dimensioning
Graph objects including their nodes and edgepaths.
"""
def __call__(self, specs=None, **dimensions):
redimmed = super(graph_redim, self).__call__(specs, **dimensions)
new_data = (redimmed.data,)
if self.parent.nodes:
new_data = new_data + (self.parent.nodes.redim(specs, **dimensions),)
if self.parent._edgepaths:
new_data = new_data + (self.parent.edgepaths.redim(specs, **dimensions),)
return redimmed.clone(new_data)
def circular_layout(nodes):
N = len(nodes)
circ = np.pi/N*np.arange(N)*2
x = np.cos(circ)
y = np.sin(circ)
return (x, y, nodes)
class layout_nodes(Operation):
"""
Accepts a Graph and lays out the corresponding nodes with the
supplied networkx layout function. If no layout function is
supplied uses a simple circular_layout function.
"""
layout = param.Callable(default=None, doc="""
A NetworkX layout function""")
def _process(self, element, key=None):
if self.p.layout:
graph = nx.from_edgelist(element.array([0, 1]))
positions = self.p.layout(graph)
return Nodes([tuple(pos)+(idx,) for idx, pos in sorted(positions.items())])
else:
source = element.dimension_values(0, expanded=False)
target = element.dimension_values(1, expanded=False)
nodes = np.unique(np.concatenate([source, target]))
return Nodes(circular_layout(nodes))
class Graph(Dataset, Element2D):
"""
Graph is high-level Element representing both nodes and edges.
A Graph may be defined in an abstract form representing just
the abstract edges between nodes and optionally may be made
concrete by supplying a Nodes Element defining the concrete
positions of each node. If the node positions are supplied
the EdgePaths (defining the concrete edges) can be inferred
automatically or supplied explicitly.
The constructor accepts regular columnar data defining the edges
or a tuple of the abstract edges and nodes, or a tuple of the
abstract edges, nodes, and edgepaths.
"""
group = param.String(default='Graph', constant=True)
kdims = param.List(default=[Dimension('start'), Dimension('end')],
bounds=(2, 2))
def __init__(self, data, **params):
if isinstance(data, tuple):
data = data + (None,)* (3-len(data))
edges, nodes, edgepaths = data
else:
edges, nodes, edgepaths = data, None, None
if nodes is not None:
node_info = None
if isinstance(nodes, Nodes):
pass
elif not isinstance(nodes, Dataset) or nodes.ndims == 3:
nodes = Nodes(nodes)
else:
node_info = nodes
nodes = None
else:
node_info = None
if edgepaths is not None and not isinstance(edgepaths, EdgePaths):
edgepaths = EdgePaths(edgepaths)
self._nodes = nodes
self._edgepaths = edgepaths
super(Graph, self).__init__(edges, **params)
if self._nodes is None and node_info:
nodes = self.nodes.clone(datatype=['pandas', 'dictionary'])
for d in node_info.dimensions():
nodes = nodes.add_dimension(d, len(nodes.vdims),
node_info.dimension_values(d),
vdim=True)
self._nodes = nodes
if self._edgepaths:
mismatch = []
for kd1, kd2 in zip(self.nodes.kdims, self.edgepaths.kdims):
if kd1 != kd2:
mismatch.append('%s != %s' % (kd1, kd2))
if mismatch:
raise ValueError('Ensure that the first two key dimensions on '
'Nodes and EdgePaths match: %s' % ', '.join(mismatch))
self.redim = graph_redim(self, mode='dataset')
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
if data is None:
data = (self.data, self.nodes)
if self._edgepaths:
data = data + (self.edgepaths,)
elif not isinstance(data, tuple):
data = (data, self.nodes)
if self._edgepaths:
data = data + (self.edgepaths,)
return super(Graph, self).clone(data, shared_data, new_type, *args, **overrides)
def select(self, selection_specs=None, **selection):
"""
Allows selecting data by the slices, sets and scalar values
along a particular dimension. The indices should be supplied as
keywords mapping between the selected dimension and
value. Additionally selection_specs (taking the form of a list
of type.group.label strings, types or functions) may be
supplied, which will ensure the selection is only applied if the
specs match the selected object.
"""
selection = {dim: sel for dim, sel in selection.items()
if dim in self.dimensions('ranges')+['selection_mask']}
if (selection_specs and not any(self.matches(sp) for sp in selection_specs)
or not selection):
return self
index_dim = self.nodes.kdims[2].name
dimensions = self.kdims+self.vdims
node_selection = {index_dim: v for k, v in selection.items()
if k in self.kdims}
nodes = self.nodes.select(**dict(selection, **node_selection))
selection = {k: v for k, v in selection.items() if k in dimensions}
if len(nodes) != len(self.nodes):
xdim, ydim = dimensions[:2]
indices = list(nodes.dimension_values(2))
selection[xdim.name] = indices
selection[ydim.name] = indices
if selection:
mask = self.interface.select_mask(self, selection)
data = self.interface.select(self, mask)
if not np.all(mask):
new_graph = self.clone((data, nodes))
source = new_graph.dimension_values(0, expanded=False)
target = new_graph.dimension_values(1, expanded=False)
unique_nodes = np.unique(np.concatenate([source, target]))
nodes = new_graph.nodes[:, :, list(unique_nodes)]
paths = None
if self._edgepaths:
paths = self.edgepaths.interface.select_paths(self.edgepaths, mask)
else:
data = self.data
paths = self._edgepaths
return self.clone((data, nodes, paths))
def range(self, dimension, data_range=True):
if self.nodes and dimension in self.nodes.dimensions():
node_range = self.nodes.range(dimension, data_range)
if self._edgepaths:
path_range = self._edgepaths.range(dimension, data_range)
return max_range([node_range, path_range])
return node_range
return super(Graph, self).range(dimension, data_range)
def dimensions(self, selection='all', label=False):
dimensions = super(Graph, self).dimensions(selection, label)
if selection == 'ranges':
if self._nodes:
node_dims = self.nodes.dimensions(selection, label)
else:
node_dims = Nodes.kdims+Nodes.vdims
if label in ['name', True, 'short']:
node_dims = [d.name for d in node_dims]
elif label in ['long', 'label']:
node_dims = [d.label for d in node_dims]
return dimensions+node_dims
return dimensions
@property
def nodes(self):
"""
Computes the node positions the first time they are requested
if no explicit node information was supplied.
"""
if self._nodes is None:
self._nodes = layout_nodes(self)
return self._nodes
@property
def edgepaths(self):
"""
Returns the fixed EdgePaths or computes direct connections
between supplied nodes.
"""
if self._edgepaths:
return self._edgepaths
paths = []
for start, end in self.array(self.kdims):
start_ds = self.nodes[:, :, start]
end_ds = self.nodes[:, :, end]
sx, sy = start_ds.array(start_ds.kdims[:2]).T
ex, ey = end_ds.array(end_ds.kdims[:2]).T
paths.append([(sx[0], sy[0]), (ex[0], ey[0])])
return EdgePaths(paths, kdims=self.nodes.kdims[:2])
@classmethod
def from_networkx(cls, G, layout_function, nodes=None, **kwargs):
"""
Generate a HoloViews Graph from a networkx.Graph object and
networkx layout function. Any keyword arguments will be passed
to the layout function.
"""
positions = layout_function(G, **kwargs)
if nodes:
xs, ys = zip(*[v for k, v in sorted(positions.items())])
nodes = nodes.add_dimension('x', 0, xs)
nodes = nodes.add_dimension('y', 1, ys).clone(new_type=Nodes)
else:
nodes = Nodes([tuple(pos)+(idx,) for idx, pos in sorted(positions.items())])
return cls((G.edges(), nodes))
class Nodes(Points):
"""
Nodes is a simple Element representing Graph nodes as a set of
Points. Unlike regular Points, Nodes must define a third key
dimension corresponding to the node index.
"""
kdims = param.List(default=[Dimension('x'), Dimension('y'),
Dimension('index')], bounds=(3, 3))
group = param.String(default='Nodes', constant=True)
class EdgePaths(Path):
"""
EdgePaths is a simple Element representing the paths of edges
connecting nodes in a graph.
"""
group = param.String(default='EdgePaths', constant=True)
| 1 | 18,746 | Minor point but I would call this ``redim_graph`` instead. | holoviz-holoviews | py |
@@ -26,12 +26,14 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/klog"
+ "k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
+ "sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
+ "sigs.k8s.io/cluster-api/pkg/controller/noderefutil"
capierrors "sigs.k8s.io/cluster-api/pkg/errors"
"sigs.k8s.io/cluster-api/pkg/util"
"sigs.k8s.io/controller-runtime/pkg/client" | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package awsmachine
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/infrastructure/v1alpha2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2"
capierrors "sigs.k8s.io/cluster-api/pkg/errors"
"sigs.k8s.io/cluster-api/pkg/util"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
waitForClusterInfrastructureReadyDuration = 15 * time.Second //nolint
waitForControlPlaneMachineExistenceDuration = 5 * time.Second //nolint
waitForControlPlaneReadyDuration = 5 * time.Second //nolint
)
// Add creates a new AWSMachine Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) *ReconcileAWSMachine {
return &ReconcileAWSMachine{
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("awsmachine-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to AWSMachine
err = c.Watch(
&source.Kind{Type: &infrav1.AWSMachine{}},
&handler.EnqueueRequestForObject{},
)
if err != nil {
return err
}
return c.Watch(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: util.MachineToInfrastructureMapFunc(schema.GroupVersionKind{
Group: infrav1.SchemeGroupVersion.Group,
Version: infrav1.SchemeGroupVersion.Version,
Kind: "AWSMachine",
}),
},
)
}
var _ reconcile.Reconciler = &ReconcileAWSMachine{}
// ReconcileAWSMachine reconciles a AWSMachine object
type ReconcileAWSMachine struct {
client.Client
scheme *runtime.Scheme
}
// Reconcile reads that state of the cluster for a AWSMachine object and makes changes based on the state read
// and what is in the AWSMachine.Spec
func (r *ReconcileAWSMachine) Reconcile(request reconcile.Request) (reconcile.Result, error) {
ctx := context.Background()
// Fetch the AWSMachine instance.
awsm := &infrav1.AWSMachine{}
err := r.Get(ctx, request.NamespacedName, awsm)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// If the Machine hasn't been deleted and doesn't have a finalizer, add one.
if awsm.ObjectMeta.DeletionTimestamp.IsZero() {
if !util.Contains(awsm.Finalizers, clusterv1.MachineFinalizer) {
awsm.Finalizers = append(awsm.ObjectMeta.Finalizers, clusterv1.MachineFinalizer)
}
}
// Create the scope
scope, err := scope.NewMachineScope(scope.MachineScopeParams{
ProviderMachine: awsm,
Client: r.Client,
})
if err != nil {
if requeueErr, ok := errors.Cause(err).(capierrors.HasRequeueAfterError); ok {
return reconcile.Result{RequeueAfter: requeueErr.GetRequeueAfter()}, nil
}
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
defer scope.Close()
// Make sure bootstrap data is available and populated.
if scope.Machine.Spec.Bootstrap.Data == nil || *scope.Machine.Spec.Bootstrap.Data == "" {
klog.Infof("Waiting for bootstrap data to be available on AWSMachine %q/%q", awsm.Namespace, awsm.Name)
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
}
// Call the internal reconciler.
if err := r.reconcile(ctx, scope); err != nil {
if requeueErr, ok := errors.Cause(err).(capierrors.HasRequeueAfterError); ok {
klog.Infof("Reconciliation for AWSMachine %q in namespace %q asked to requeue: %v", awsm.Name, awsm.Namespace, err)
return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil
}
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func (r *ReconcileAWSMachine) reconcile(ctx context.Context, scope *scope.MachineScope) error {
exist, err := r.exists(scope)
if err != nil {
klog.Errorf("Failed to check if Machine %q infrastructure exists: %v", scope.Name(), err)
return err
}
// Reconcile ProviderID.
if pid := scope.GetProviderID(); pid == nil || *pid == "" {
scope.SetProviderID(fmt.Sprintf("aws:////%s", *scope.GetInstanceID()))
}
if exist {
scope.Info("Reconciling Machine triggers idempotent update")
return r.update(scope)
}
// Machine resource created. Machine does not yet exist.
scope.Info("Reconciling Machine triggers idempotent create")
return r.create(scope)
}
// create creates a machine and is invoked by the machine controller.
func (r *ReconcileAWSMachine) create(scope *scope.MachineScope) error {
if scope.Parent.Cluster.Annotations[infrav1.AnnotationClusterInfrastructureReady] != infrav1.ValueReady {
scope.Info("Cluster infrastructure is not ready yet - requeuing machine")
return &capierrors.RequeueAfterError{RequeueAfter: waitForClusterInfrastructureReadyDuration}
}
ec2svc := ec2.NewService(scope.Parent)
scope.Info("Retrieving machines for cluster")
machineList := &clusterv1.MachineList{}
if err := r.List(context.Background(), machineList, scope.Parent.ListOptionsLabelSelector()); err != nil {
return errors.Wrapf(err, "failed to retrieve machines in cluster %q", scope.Parent.Name())
}
controlPlaneMachines := util.GetControlPlaneMachinesFromList(machineList)
if len(controlPlaneMachines) == 0 {
scope.Info("No control plane machines exist yet - requeuing")
return &capierrors.RequeueAfterError{RequeueAfter: waitForControlPlaneMachineExistenceDuration}
}
// Create the Machine.
i, err := ec2svc.CreateOrGetMachine(scope)
if err != nil {
return errors.Errorf("failed to create or get machine: %+v", err)
}
scope.SetInstanceID(i.ID)
scope.SetInstanceState(i.State)
scope.SetAnnotation("cluster-api-provider-aws", "true")
if err := r.reconcileLBAttachment(scope, i); err != nil {
return errors.Errorf("failed to reconcile LB attachment: %+v", err)
}
scope.Info("Create completed")
return nil
}
func (r *ReconcileAWSMachine) exists(scope *scope.MachineScope) (bool, error) {
ec2svc := ec2.NewService(scope.Parent)
// TODO worry about pointers. instance if exists returns *any* instance
if scope.GetInstanceID() == nil {
return false, nil
}
instance, err := ec2svc.InstanceIfExists(scope.GetInstanceID())
if err != nil {
return false, errors.Errorf("failed to retrieve instance: %+v", err)
}
if instance == nil {
return false, nil
}
scope.Info("Found instance for machine", "instance", instance)
switch instance.State {
case infrav1.InstanceStateRunning:
scope.Info("Machine instance is running", "instance-id", *scope.GetInstanceID())
case infrav1.InstanceStatePending:
scope.Info("Machine instance is pending", "instance-id", *scope.GetInstanceID())
default:
return false, nil
}
scope.SetInstanceState(instance.State)
if err := r.reconcileLBAttachment(scope, instance); err != nil {
return true, err
}
return true, nil
}
func (r *ReconcileAWSMachine) update(scope *scope.MachineScope) error {
ec2svc := ec2.NewService(scope.Parent)
// Get the current instance description from AWS.
instanceDescription, err := ec2svc.InstanceIfExists(scope.GetInstanceID())
if err != nil {
return errors.Errorf("failed to get instance: %+v", err)
}
// We can now compare the various AWS state to the state we were passed.
// We will check immutable state first, in order to fail quickly before
// moving on to state that we can mutate.
if errs := r.isMachineOutdated(&scope.ProviderMachine.Spec, instanceDescription); len(errs) > 0 {
return errors.Errorf("found attempt to change immutable state for machine %q: %+q", scope.Name(), errs)
}
existingSecurityGroups, err := ec2svc.GetInstanceSecurityGroups(*scope.GetInstanceID())
if err != nil {
return err
}
// Ensure that the security groups are correct.
_, err = r.ensureSecurityGroups(
ec2svc,
scope,
scope.ProviderMachine.Spec.AdditionalSecurityGroups,
existingSecurityGroups,
)
if err != nil {
return errors.Errorf("failed to apply security groups: %+v", err)
}
// Ensure that the tags are correct.
_, err = r.ensureTags(
ec2svc,
scope.ProviderMachine,
scope.GetInstanceID(),
scope.ProviderMachine.Spec.AdditionalTags,
)
if err != nil {
return errors.Errorf("failed to ensure tags: %+v", err)
}
return nil
}
func (r *ReconcileAWSMachine) reconcileLBAttachment(scope *scope.MachineScope, i *infrav1.Instance) error {
if !scope.IsControlPlane() {
return nil
}
elbsvc := elb.NewService(scope.Parent)
if err := elbsvc.RegisterInstanceWithAPIServerELB(i.ID); err != nil {
return errors.Wrapf(err, "could not register control plane instance %q with load balancer", i.ID)
}
return nil
}
// isMachineOudated checks that no immutable fields have been updated in an
// Update request.
// Returns a slice of errors representing attempts to change immutable state
func (r *ReconcileAWSMachine) isMachineOutdated(spec *infrav1.AWSMachineSpec, i *infrav1.Instance) (errs []error) {
// Instance Type
if spec.InstanceType != i.Type {
errs = append(errs, errors.Errorf("instance type cannot be mutated from %q to %q", i.Type, spec.InstanceType))
}
// IAM Profile
if spec.IAMInstanceProfile != i.IAMProfile {
errs = append(errs, errors.Errorf("instance IAM profile cannot be mutated from %q to %q", i.IAMProfile, spec.IAMInstanceProfile))
}
// SSH Key Name
if spec.KeyName != aws.StringValue(i.KeyName) {
errs = append(errs, errors.Errorf("SSH key name cannot be mutated from %q to %q", aws.StringValue(i.KeyName), spec.KeyName))
}
// Root Device Size
if spec.RootDeviceSize > 0 && spec.RootDeviceSize != i.RootDeviceSize {
errs = append(errs, errors.Errorf("Root volume size cannot be mutated from %v to %v", i.RootDeviceSize, spec.RootDeviceSize))
}
// Subnet ID
// spec.Subnet is a *AWSResourceReference and could technically be
// a *string, ARN or Filter. However, elsewhere in the code it is only used
// as a *string, so do the same here.
if spec.Subnet != nil {
if aws.StringValue(spec.Subnet.ID) != i.SubnetID {
errs = append(errs, errors.Errorf("machine subnet ID cannot be mutated from %q to %q",
i.SubnetID, aws.StringValue(spec.Subnet.ID)))
}
}
// PublicIP check is a little more complicated as the machineConfig is a
// simple bool indicating if the instance should have a public IP or not,
// while the instanceDescription contains the public IP assigned to the
// instance.
// Work out whether the instance already has a public IP or not based on
// the length of the PublicIP string. Anything >0 is assumed to mean it does
// have a public IP.
instanceHasPublicIP := false
if len(aws.StringValue(i.PublicIP)) > 0 {
instanceHasPublicIP = true
}
if aws.BoolValue(spec.PublicIP) != instanceHasPublicIP {
errs = append(errs, errors.Errorf(`public IP setting cannot be mutated from "%v" to "%v"`,
instanceHasPublicIP, aws.BoolValue(spec.PublicIP)))
}
return errs
}
| 1 | 10,104 | As I was trying to figure out how the error messages are written out, since they are returned from `getOrCreate` and `reconcile`, it looks like we are using plain `klog` to write them out instead of using the logger from the scope above | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -40,15 +40,11 @@ func (service *servicePFCtl) Add(rule RuleForwarding) {
func (service *servicePFCtl) Start() error {
err := service.ipForward.Enable()
if err != nil {
- return err
+ log.Warn(natLogPrefix, "Failed to enable IP forwarding: ", err)
}
service.clearStaleRules()
- err = service.enableRules()
- if err != nil {
- return err
- }
- return nil
+ return service.enableRules()
}
func (service *servicePFCtl) Stop() { | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package nat
import (
"errors"
"fmt"
"net"
"os/exec"
"strings"
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/node/utils"
)
type servicePFCtl struct {
rules []RuleForwarding
ipForward serviceIPForward
}
func (service *servicePFCtl) Add(rule RuleForwarding) {
service.rules = append(service.rules, rule)
}
func (service *servicePFCtl) Start() error {
err := service.ipForward.Enable()
if err != nil {
return err
}
service.clearStaleRules()
err = service.enableRules()
if err != nil {
return err
}
return nil
}
func (service *servicePFCtl) Stop() {
service.disableRules()
service.ipForward.Disable()
}
func ifaceByAddress(ipAddress string) (string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return "", err
}
for _, ifi := range ifaces {
addresses, err := ifi.Addrs()
if err != nil {
return "", err
}
for _, address := range addresses {
if address.(*net.IPNet).IP.String() == ipAddress {
return ifi.Name, nil
}
}
}
return "", errors.New("not able to determine outbound ethernet interface")
}
func (service *servicePFCtl) enableRules() error {
for _, rule := range service.rules {
iface, err := ifaceByAddress(rule.TargetIP)
if err != nil {
return err
}
natRule := fmt.Sprintf("nat on %v inet from %v to any -> %v", iface, rule.SourceAddress, rule.TargetIP)
arguments := fmt.Sprintf(`echo "%v" | /sbin/pfctl -vEf -`, natRule)
cmd := exec.Command(
"sh",
"-c",
arguments,
)
if output, err := cmd.CombinedOutput(); err != nil {
if !strings.Contains(string(output), natRule) {
log.Warn("Failed to create pfctl rule: ", cmd.Args, " Returned exit error: ", err.Error(), " Cmd output: ", string(output))
return err
}
}
log.Info(natLogPrefix, "NAT rule from '", rule.SourceAddress, "' to IP: ", rule.TargetIP, " added")
}
return nil
}
func (service *servicePFCtl) disableRules() {
cmd := utils.SplitCommand("/sbin/pfctl", "-F nat")
if output, err := cmd.CombinedOutput(); err != nil {
log.Warn("Failed cleanup pfctl rules: ", cmd.Args, " Returned exit error: ", err.Error(), " Cmd output: ", string(output))
}
log.Info(natLogPrefix, "NAT rules cleared")
}
func (service *servicePFCtl) clearStaleRules() {
service.disableRules()
}
| 1 | 12,010 | Now it's double logging, is not it? Because `service.ipForward.Enable()` logs extra `warning` | mysteriumnetwork-node | go |
@@ -256,6 +256,9 @@ OpenStreetMap::Application.routes.draw do
# directions
get "/directions" => "directions#search"
+ # set home location
+ match "/set_home_loc" => "user#set_home_location", :via => [:get, :post]
+
# export
post "/export/finish" => "export#finish"
get "/export/embed" => "export#embed" | 1 | OpenStreetMap::Application.routes.draw do
# API
get "api/capabilities" => "api#capabilities"
scope "api/0.6" do
get "capabilities" => "api#capabilities"
get "permissions" => "api#permissions"
put "changeset/create" => "changeset#create"
post "changeset/:id/upload" => "changeset#upload", :id => /\d+/
get "changeset/:id/download" => "changeset#download", :as => :changeset_download, :id => /\d+/
post "changeset/:id/expand_bbox" => "changeset#expand_bbox", :id => /\d+/
get "changeset/:id" => "changeset#read", :as => :changeset_read, :id => /\d+/
post "changeset/:id/subscribe" => "changeset#subscribe", :as => :changeset_subscribe, :id => /\d+/
post "changeset/:id/unsubscribe" => "changeset#unsubscribe", :as => :changeset_unsubscribe, :id => /\d+/
put "changeset/:id" => "changeset#update", :id => /\d+/
put "changeset/:id/close" => "changeset#close", :id => /\d+/
get "changesets" => "changeset#query"
post "changeset/:id/comment" => "changeset#comment", :as => :changeset_comment, :id => /\d+/
post "changeset/comment/:id/hide" => "changeset#hide_comment", :as => :changeset_comment_hide, :id => /\d+/
post "changeset/comment/:id/unhide" => "changeset#unhide_comment", :as => :changeset_comment_unhide, :id => /\d+/
put "node/create" => "node#create"
get "node/:id/ways" => "way#ways_for_node", :id => /\d+/
get "node/:id/relations" => "relation#relations_for_node", :id => /\d+/
get "node/:id/history" => "old_node#history", :id => /\d+/
post "node/:id/:version/redact" => "old_node#redact", :version => /\d+/, :id => /\d+/
get "node/:id/:version" => "old_node#version", :id => /\d+/, :version => /\d+/
get "node/:id" => "node#read", :id => /\d+/
put "node/:id" => "node#update", :id => /\d+/
delete "node/:id" => "node#delete", :id => /\d+/
get "nodes" => "node#nodes"
put "way/create" => "way#create"
get "way/:id/history" => "old_way#history", :id => /\d+/
get "way/:id/full" => "way#full", :id => /\d+/
get "way/:id/relations" => "relation#relations_for_way", :id => /\d+/
post "way/:id/:version/redact" => "old_way#redact", :version => /\d+/, :id => /\d+/
get "way/:id/:version" => "old_way#version", :id => /\d+/, :version => /\d+/
get "way/:id" => "way#read", :id => /\d+/
put "way/:id" => "way#update", :id => /\d+/
delete "way/:id" => "way#delete", :id => /\d+/
get "ways" => "way#ways"
put "relation/create" => "relation#create"
get "relation/:id/relations" => "relation#relations_for_relation", :id => /\d+/
get "relation/:id/history" => "old_relation#history", :id => /\d+/
get "relation/:id/full" => "relation#full", :id => /\d+/
post "relation/:id/:version/redact" => "old_relation#redact", :version => /\d+/, :id => /\d+/
get "relation/:id/:version" => "old_relation#version", :id => /\d+/, :version => /\d+/
get "relation/:id" => "relation#read", :id => /\d+/
put "relation/:id" => "relation#update", :id => /\d+/
delete "relation/:id" => "relation#delete", :id => /\d+/
get "relations" => "relation#relations"
get "map" => "api#map"
get "trackpoints" => "api#trackpoints"
get "changes" => "api#changes"
get "search" => "search#search_all", :as => "api_search"
get "ways/search" => "search#search_ways"
get "relations/search" => "search#search_relations"
get "nodes/search" => "search#search_nodes"
get "user/:id" => "user#api_read", :id => /\d+/
get "user/details" => "user#api_details"
get "user/gpx_files" => "user#api_gpx_files"
get "user/preferences" => "user_preferences#read"
get "user/preferences/:preference_key" => "user_preferences#read_one"
put "user/preferences" => "user_preferences#update"
put "user/preferences/:preference_key" => "user_preferences#update_one"
delete "user/preferences/:preference_key" => "user_preferences#delete_one"
post "gpx/create" => "trace#api_create"
get "gpx/:id" => "trace#api_read", :id => /\d+/
put "gpx/:id" => "trace#api_update", :id => /\d+/
delete "gpx/:id" => "trace#api_delete", :id => /\d+/
get "gpx/:id/details" => "trace#api_read", :id => /\d+/
get "gpx/:id/data" => "trace#api_data"
# AMF (ActionScript) API
post "amf/read" => "amf#amf_read"
post "amf/write" => "amf#amf_write"
get "swf/trackpoints" => "swf#trackpoints"
# Map notes API
resources :notes, :except => [:new, :edit, :update], :constraints => { :id => /\d+/ }, :defaults => { :format => "xml" } do
collection do
get "search"
get "feed", :defaults => { :format => "rss" }
end
member do
post "comment"
post "close"
post "reopen"
end
end
post "notes/addPOIexec" => "notes#create"
post "notes/closePOIexec" => "notes#close"
post "notes/editPOIexec" => "notes#comment"
get "notes/getGPX" => "notes#index", :format => "gpx"
get "notes/getRSSfeed" => "notes#feed", :format => "rss"
end
# Data browsing
get "/way/:id" => "browse#way", :id => /\d+/, :as => :way
get "/way/:id/history" => "browse#way_history", :id => /\d+/
get "/node/:id" => "browse#node", :id => /\d+/, :as => :node
get "/node/:id/history" => "browse#node_history", :id => /\d+/
get "/relation/:id" => "browse#relation", :id => /\d+/, :as => :relation
get "/relation/:id/history" => "browse#relation_history", :id => /\d+/
get "/changeset/:id" => "browse#changeset", :as => :changeset, :id => /\d+/
get "/changeset/:id/comments/feed" => "changeset#comments_feed", :as => :changeset_comments_feed, :id => /\d*/, :defaults => { :format => "rss" }
get "/note/:id" => "browse#note", :id => /\d+/, :as => "browse_note"
get "/note/new" => "browse#new_note"
get "/user/:display_name/history" => "changeset#list"
get "/user/:display_name/history/feed" => "changeset#feed", :defaults => { :format => :atom }
get "/user/:display_name/notes" => "notes#mine"
get "/history/friends" => "changeset#list", :friends => true, :as => "friend_changesets", :defaults => { :format => :html }
get "/history/nearby" => "changeset#list", :nearby => true, :as => "nearby_changesets", :defaults => { :format => :html }
get "/browse/way/:id", :to => redirect(:path => "/way/%{id}")
get "/browse/way/:id/history", :to => redirect(:path => "/way/%{id}/history")
get "/browse/node/:id", :to => redirect(:path => "/node/%{id}")
get "/browse/node/:id/history", :to => redirect(:path => "/node/%{id}/history")
get "/browse/relation/:id", :to => redirect(:path => "/relation/%{id}")
get "/browse/relation/:id/history", :to => redirect(:path => "/relation/%{id}/history")
get "/browse/changeset/:id", :to => redirect(:path => "/changeset/%{id}")
get "/browse/note/:id", :to => redirect(:path => "/note/%{id}")
get "/user/:display_name/edits", :to => redirect(:path => "/user/%{display_name}/history")
get "/user/:display_name/edits/feed", :to => redirect(:path => "/user/%{display_name}/history/feed")
get "/browse/friends", :to => redirect(:path => "/history/friends")
get "/browse/nearby", :to => redirect(:path => "/history/nearby")
get "/browse/changesets/feed", :to => redirect(:path => "/history/feed")
get "/browse/changesets", :to => redirect(:path => "/history")
get "/browse", :to => redirect(:path => "/history")
# web site
root :to => "site#index", :via => [:get, :post]
get "/edit" => "site#edit"
get "/copyright/:copyright_locale" => "site#copyright"
get "/copyright" => "site#copyright"
get "/welcome" => "site#welcome"
get "/fixthemap" => "site#fixthemap"
get "/help" => "site#help"
get "/about" => "site#about"
get "/history" => "changeset#list"
get "/history/feed" => "changeset#feed", :defaults => { :format => :atom }
get "/history/comments/feed" => "changeset#comments_feed", :as => :changesets_comments_feed, :defaults => { :format => "rss" }
get "/export" => "site#export"
match "/login" => "user#login", :via => [:get, :post]
match "/logout" => "user#logout", :via => [:get, :post]
get "/offline" => "site#offline"
get "/key" => "site#key"
get "/id" => "site#id"
get "/query" => "browse#query"
get "/user/new" => "user#new"
post "/user/new" => "user#create"
get "/user/terms" => "user#terms"
post "/user/save" => "user#save"
get "/user/:display_name/confirm/resend" => "user#confirm_resend"
match "/user/:display_name/confirm" => "user#confirm", :via => [:get, :post]
match "/user/confirm" => "user#confirm", :via => [:get, :post]
match "/user/confirm-email" => "user#confirm_email", :via => [:get, :post]
post "/user/go_public" => "user#go_public"
match "/user/reset-password" => "user#reset_password", :via => [:get, :post]
match "/user/forgot-password" => "user#lost_password", :via => [:get, :post]
get "/user/suspended" => "user#suspended"
get "/index.html", :to => redirect(:path => "/")
get "/create-account.html", :to => redirect(:path => "/user/new")
get "/forgot-password.html", :to => redirect(:path => "/user/forgot-password")
# omniauth
get "/auth/failure" => "user#auth_failure"
match "/auth/:provider/callback" => "user#auth_success", :via => [:get, :post], :as => :auth_success
match "/auth/:provider" => "user#auth", :via => [:get, :post], :as => :auth
# permalink
get "/go/:code" => "site#permalink", :code => /[a-zA-Z0-9_@~]+[=-]*/
# rich text preview
post "/preview/:type" => "site#preview", :as => :preview
# traces
get "/user/:display_name/traces/tag/:tag/page/:page" => "trace#list", :page => /[1-9][0-9]*/
get "/user/:display_name/traces/tag/:tag" => "trace#list"
get "/user/:display_name/traces/page/:page" => "trace#list", :page => /[1-9][0-9]*/
get "/user/:display_name/traces" => "trace#list"
get "/user/:display_name/traces/tag/:tag/rss" => "trace#georss", :defaults => { :format => :rss }
get "/user/:display_name/traces/rss" => "trace#georss", :defaults => { :format => :rss }
get "/user/:display_name/traces/:id" => "trace#view"
get "/user/:display_name/traces/:id/picture" => "trace#picture"
get "/user/:display_name/traces/:id/icon" => "trace#icon"
get "/traces/tag/:tag/page/:page" => "trace#list", :page => /[1-9][0-9]*/
get "/traces/tag/:tag" => "trace#list"
get "/traces/page/:page" => "trace#list", :page => /[1-9][0-9]*/
get "/traces" => "trace#list"
get "/traces/tag/:tag/rss" => "trace#georss", :defaults => { :format => :rss }
get "/traces/rss" => "trace#georss", :defaults => { :format => :rss }
get "/traces/mine/tag/:tag/page/:page" => "trace#mine", :page => /[1-9][0-9]*/
get "/traces/mine/tag/:tag" => "trace#mine"
get "/traces/mine/page/:page" => "trace#mine"
get "/traces/mine" => "trace#mine"
match "/trace/create" => "trace#create", :via => [:get, :post]
get "/trace/:id/data" => "trace#data", :id => /\d+/, :as => "trace_data"
match "/trace/:id/edit" => "trace#edit", :via => [:get, :post], :id => /\d+/, :as => "trace_edit"
post "/trace/:id/delete" => "trace#delete", :id => /\d+/
# diary pages
match "/diary/new" => "diary_entry#new", :via => [:get, :post]
get "/diary/friends" => "diary_entry#list", :friends => true, :as => "friend_diaries"
get "/diary/nearby" => "diary_entry#list", :nearby => true, :as => "nearby_diaries"
get "/user/:display_name/diary/rss" => "diary_entry#rss", :defaults => { :format => :rss }
get "/diary/:language/rss" => "diary_entry#rss", :defaults => { :format => :rss }
get "/diary/rss" => "diary_entry#rss", :defaults => { :format => :rss }
get "/user/:display_name/diary/comments/:page" => "diary_entry#comments", :page => /[1-9][0-9]*/
get "/user/:display_name/diary/comments/" => "diary_entry#comments"
get "/user/:display_name/diary" => "diary_entry#list"
get "/diary/:language" => "diary_entry#list"
get "/diary" => "diary_entry#list"
get "/user/:display_name/diary/:id" => "diary_entry#view", :id => /\d+/, :as => :diary_entry
post "/user/:display_name/diary/:id/newcomment" => "diary_entry#comment", :id => /\d+/
match "/user/:display_name/diary/:id/edit" => "diary_entry#edit", :via => [:get, :post], :id => /\d+/
post "/user/:display_name/diary/:id/hide" => "diary_entry#hide", :id => /\d+/, :as => :hide_diary_entry
post "/user/:display_name/diary/:id/hidecomment/:comment" => "diary_entry#hidecomment", :id => /\d+/, :comment => /\d+/, :as => :hide_diary_comment
post "/user/:display_name/diary/:id/subscribe" => "diary_entry#subscribe", :as => :diary_entry_subscribe, :id => /\d+/
post "/user/:display_name/diary/:id/unsubscribe" => "diary_entry#unsubscribe", :as => :diary_entry_unsubscribe, :id => /\d+/
# user pages
get "/user/:display_name" => "user#view", :as => "user"
match "/user/:display_name/make_friend" => "user#make_friend", :via => [:get, :post], :as => "make_friend"
match "/user/:display_name/remove_friend" => "user#remove_friend", :via => [:get, :post], :as => "remove_friend"
match "/user/:display_name/account" => "user#account", :via => [:get, :post]
get "/user/:display_name/set_status" => "user#set_status", :as => :set_status_user
get "/user/:display_name/delete" => "user#delete", :as => :delete_user
# user lists
match "/users" => "user#list", :via => [:get, :post]
match "/users/:status" => "user#list", :via => [:get, :post]
# geocoder
get "/search" => "geocoder#search"
get "/geocoder/search_latlon" => "geocoder#search_latlon"
get "/geocoder/search_ca_postcode" => "geocoder#search_ca_postcode"
get "/geocoder/search_osm_nominatim" => "geocoder#search_osm_nominatim"
get "/geocoder/search_geonames" => "geocoder#search_geonames"
get "/geocoder/search_osm_nominatim_reverse" => "geocoder#search_osm_nominatim_reverse"
get "/geocoder/search_geonames_reverse" => "geocoder#search_geonames_reverse"
# directions
get "/directions" => "directions#search"
# export
post "/export/finish" => "export#finish"
get "/export/embed" => "export#embed"
# messages
get "/user/:display_name/inbox" => "message#inbox", :as => "inbox"
get "/user/:display_name/outbox" => "message#outbox", :as => "outbox"
match "/message/new/:display_name" => "message#new", :via => [:get, :post], :as => "new_message"
get "/message/read/:message_id" => "message#read", :as => "read_message"
post "/message/mark/:message_id" => "message#mark", :as => "mark_message"
match "/message/reply/:message_id" => "message#reply", :via => [:get, :post], :as => "reply_message"
post "/message/delete/:message_id" => "message#delete", :as => "delete_message"
# oauth admin pages (i.e: for setting up new clients, etc...)
scope "/user/:display_name" do
resources :oauth_clients
end
match "/oauth/revoke" => "oauth#revoke", :via => [:get, :post]
match "/oauth/authorize" => "oauth#authorize", :via => [:get, :post], :as => :authorize
get "/oauth/token" => "oauth#token", :as => :token
match "/oauth/request_token" => "oauth#request_token", :via => [:get, :post], :as => :request_token
match "/oauth/access_token" => "oauth#access_token", :via => [:get, :post], :as => :access_token
get "/oauth/test_request" => "oauth#test_request", :as => :test_request
# roles and banning pages
post "/user/:display_name/role/:role/grant" => "user_roles#grant", :as => "grant_role"
post "/user/:display_name/role/:role/revoke" => "user_roles#revoke", :as => "revoke_role"
get "/user/:display_name/blocks" => "user_blocks#blocks_on"
get "/user/:display_name/blocks_by" => "user_blocks#blocks_by"
get "/blocks/new/:display_name" => "user_blocks#new", :as => "new_user_block"
resources :user_blocks
match "/blocks/:id/revoke" => "user_blocks#revoke", :via => [:get, :post], :as => "revoke_user_block"
# redactions
resources :redactions
end
| 1 | 11,280 | Whatever the decision is about the request type to use, this should only match what is needed and not the other one. | openstreetmap-openstreetmap-website | rb |
@@ -1025,7 +1025,9 @@ Blockly.BlockSvg.prototype.handleDragFree_ = function(oldXY, newXY, e) {
}
var updatePreviews = true;
- if (Blockly.localConnection_ && Blockly.highlightedConnection_) {
+ if (localConnection && localConnection.type == Blockly.OUTPUT_VALUE) {
+ updatePreviews = true; // Always update previews for output connections.
+ } else if (Blockly.localConnection_ && Blockly.highlightedConnection_) {
var xDiff = Blockly.localConnection_.x_ + dxy.x -
Blockly.highlightedConnection_.x_;
var yDiff = Blockly.localConnection_.y_ + dxy.y - | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Methods for graphically rendering a block as SVG.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.BlockSvg');
goog.require('Blockly.Block');
goog.require('Blockly.ContextMenu');
goog.require('Blockly.RenderedConnection');
goog.require('goog.Timer');
goog.require('goog.asserts');
goog.require('goog.dom');
goog.require('goog.math.Coordinate');
goog.require('goog.userAgent');
/**
* Class for a block's SVG representation.
* Not normally called directly, workspace.newBlock() is preferred.
* @param {!Blockly.Workspace} workspace The block's workspace.
* @param {?string} prototypeName Name of the language object containing
* type-specific functions for this block.
* @param {=string} opt_id Optional ID. Use this ID if provided, otherwise
* create a new id.
* @extends {Blockly.Block}
* @constructor
*/
Blockly.BlockSvg = function(workspace, prototypeName, opt_id) {
// Create core elements for the block.
/**
* @type {SVGElement}
* @private
*/
this.svgGroup_ = Blockly.createSvgElement('g', {}, null);
/** @type {SVGElement} */
this.svgPath_ = Blockly.createSvgElement('path', {'class': 'blocklyPath'},
this.svgGroup_);
this.svgPath_.tooltip = this;
/** @type {boolean} */
this.rendered = false;
Blockly.Tooltip.bindMouseEvents(this.svgPath_);
Blockly.BlockSvg.superClass_.constructor.call(this,
workspace, prototypeName, opt_id);
};
goog.inherits(Blockly.BlockSvg, Blockly.Block);
/**
* Height of this block, not including any statement blocks above or below.
* @type {number}
*/
Blockly.BlockSvg.prototype.height = 0;
/**
* Width of this block, including any connected value blocks.
* @type {number}
*/
Blockly.BlockSvg.prototype.width = 0;
/**
* Opacity of this block between 0 and 1.
* @type {number}
* @private
*/
Blockly.BlockSvg.prototype.opacity_ = 1;
/**
* Original location of block being dragged.
* @type {goog.math.Coordinate}
* @private
*/
Blockly.BlockSvg.prototype.dragStartXY_ = null;
/**
* Whether the block glows as if running.
* @type {boolean}
* @private
*/
Blockly.BlockSvg.prototype.isGlowingBlock_ = false;
/**
* Whether the block's whole stack glows as if running.
* @type {boolean}
* @private
*/
Blockly.BlockSvg.prototype.isGlowingStack_ = false;
/**
* Constant for identifying rows that are to be rendered inline.
* Don't collide with Blockly.INPUT_VALUE and friends.
* @const
*/
Blockly.BlockSvg.INLINE = -1;
/**
* Create and initialize the SVG representation of the block.
* May be called more than once.
*/
Blockly.BlockSvg.prototype.initSvg = function() {
goog.asserts.assert(this.workspace.rendered, 'Workspace is headless.');
// Input shapes are empty holes drawn when a value input is not connected.
this.inputShapes_ = {};
for (var i = 0, input; input = this.inputList[i]; i++) {
input.init();
if (input.type === Blockly.INPUT_VALUE) {
this.initInputShape(input);
}
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].createIcon();
}
this.updateColour();
this.updateMovable();
if (!this.workspace.options.readOnly && !this.eventsInit_) {
Blockly.bindEvent_(this.getSvgRoot(), 'mousedown', this,
this.onMouseDown_);
var thisBlock = this;
Blockly.bindEvent_(this.getSvgRoot(), 'touchstart', null,
function(e) {Blockly.longStart_(e, thisBlock);});
}
this.eventsInit_ = true;
if (!this.getSvgRoot().parentNode) {
this.workspace.getCanvas().appendChild(this.getSvgRoot());
}
};
/**
* Create and initialize the SVG element for an input shape.
* @param {!Blockly.Input} input Value input to add a shape SVG element for.
*/
Blockly.BlockSvg.prototype.initInputShape = function(input) {
this.inputShapes_[input.name] = Blockly.createSvgElement(
'path',
{
'class': 'blocklyPath',
'style': 'visibility: hidden' // Hide by default - shown when not connected.
},
this.svgGroup_
);
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.select = function() {
if (this.isShadow() && this.getParent()) {
// Shadow blocks should not be selected.
this.getParent().select();
return;
}
if (Blockly.selected == this) {
return;
}
var oldId = null;
if (Blockly.selected) {
oldId = Blockly.selected.id;
// Unselect any previously selected block.
Blockly.Events.disable();
Blockly.selected.unselect();
Blockly.Events.enable();
}
var event = new Blockly.Events.Ui(null, 'selected', oldId, this.id);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = this;
this.addSelect();
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.unselect = function() {
if (Blockly.selected != this) {
return;
}
var event = new Blockly.Events.Ui(null, 'selected', this.id, null);
event.workspaceId = this.workspace.id;
Blockly.Events.fire(event);
Blockly.selected = null;
this.removeSelect();
};
/**
* Glow only this particular block, to highlight it visually as if it's running.
* @param {boolean} isGlowingBlock Whether the block should glow.
*/
Blockly.BlockSvg.prototype.setGlowBlock = function(isGlowingBlock) {
this.isGlowingBlock_ = isGlowingBlock;
this.updateColour();
};
/**
* Glow the stack starting with this block, to highlight it visually as if it's running.
* @param {boolean} isGlowingStack Whether the stack starting with this block should glow.
*/
Blockly.BlockSvg.prototype.setGlowStack = function(isGlowingStack) {
this.isGlowingStack_ = isGlowingStack;
// Update the applied SVG filter if the property has changed
var svg = this.getSvgRoot();
if (this.isGlowingStack_ && !svg.hasAttribute('filter')) {
svg.setAttribute('filter', 'url(#blocklyStackGlowFilter)');
} else if (!this.isGlowingStack_ && svg.hasAttribute('filter')) {
svg.removeAttribute('filter');
}
};
/**
* Block's mutator icon (if any).
* @type {Blockly.Mutator}
*/
Blockly.BlockSvg.prototype.mutator = null;
/**
* Block's comment icon (if any).
* @type {Blockly.Comment}
*/
Blockly.BlockSvg.prototype.comment = null;
/**
* Block's warning icon (if any).
* @type {Blockly.Warning}
*/
Blockly.BlockSvg.prototype.warning = null;
/**
* Returns a list of mutator, comment, and warning icons.
* @return {!Array} List of icons.
*/
Blockly.BlockSvg.prototype.getIcons = function() {
var icons = [];
if (this.mutator) {
icons.push(this.mutator);
}
if (this.comment) {
icons.push(this.comment);
}
if (this.warning) {
icons.push(this.warning);
}
return icons;
};
/**
* Wrapper function called when a mouseUp occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseUpWrapper_ = null;
/**
* Wrapper function called when a mouseMove occurs during a drag operation.
* @type {Array.<!Array>}
* @private
*/
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
/**
* Stop binding to the global mouseup and mousemove events.
* @private
*/
Blockly.BlockSvg.terminateDrag_ = function() {
if (Blockly.BlockSvg.onMouseUpWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseUpWrapper_);
Blockly.BlockSvg.onMouseUpWrapper_ = null;
}
if (Blockly.BlockSvg.onMouseMoveWrapper_) {
Blockly.unbindEvent_(Blockly.BlockSvg.onMouseMoveWrapper_);
Blockly.BlockSvg.onMouseMoveWrapper_ = null;
}
var selected = Blockly.selected;
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Terminate a drag operation.
if (selected) {
if (Blockly.replacementMarker_) {
Blockly.BlockSvg.removeReplacementMarker();
} else if (Blockly.insertionMarker_) {
Blockly.Events.disable();
if (Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
Blockly.Events.enable();
}
// Update the connection locations.
var xy = selected.getRelativeToSurfaceXY();
var dxy = goog.math.Coordinate.difference(xy, selected.dragStartXY_);
var event = new Blockly.Events.Move(selected);
event.oldCoordinate = selected.dragStartXY_;
event.recordNew();
Blockly.Events.fire(event);
selected.moveConnections_(dxy.x, dxy.y);
delete selected.draggedBubbles_;
selected.setDragging_(false);
selected.moveOffDragSurface_();
selected.render();
// Ensure that any stap and bump are part of this move's event group.
var group = Blockly.Events.getGroup();
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.snapToGrid();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY / 2);
setTimeout(function() {
Blockly.Events.setGroup(group);
selected.bumpNeighbours_();
Blockly.Events.setGroup(false);
}, Blockly.BUMP_DELAY);
// Fire an event to allow scrollbars to resize.
Blockly.asyncSvgResize(this.workspace);
}
}
Blockly.dragMode_ = Blockly.DRAG_NONE;
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
};
/**
* Set parent of this block to be a new block or null.
* @param {Blockly.BlockSvg} newParent New parent block.
*/
Blockly.BlockSvg.prototype.setParent = function(newParent) {
if (newParent == this.parentBlock_) {
return;
}
var svgRoot = this.getSvgRoot();
if (this.parentBlock_ && svgRoot) {
// Move this block up the DOM. Keep track of x/y translations.
var xy = this.getRelativeToSurfaceXY();
// Avoid moving a block up the DOM if it's currently selected/dragging,
// so as to avoid taking things off the drag surface.
if (Blockly.selected != this) {
this.workspace.getCanvas().appendChild(svgRoot);
this.translate(xy.x, xy.y);
}
}
Blockly.Field.startCache();
Blockly.BlockSvg.superClass_.setParent.call(this, newParent);
Blockly.Field.stopCache();
if (newParent) {
var oldXY = this.getRelativeToSurfaceXY();
newParent.getSvgRoot().appendChild(svgRoot);
var newXY = this.getRelativeToSurfaceXY();
// Move the connections to match the child's new position.
this.moveConnections_(newXY.x - oldXY.x, newXY.y - oldXY.y);
// If we are a shadow block, inherit tertiary colour.
if (this.isShadow()) {
this.setColour(this.getColour(), this.getColourSecondary(),
newParent.getColourTertiary());
}
}
};
/**
* Return the coordinates of the top-left corner of this block relative to the
* drawing surface's origin (0,0).
* @return {!goog.math.Coordinate} Object with .x and .y properties.
*/
Blockly.BlockSvg.prototype.getRelativeToSurfaceXY = function() {
// The drawing surface is relative to either the workspace canvas
// or to the drag surface group.
var x = 0;
var y = 0;
var dragSurfaceGroup = (this.workspace.dragSurface) ?
this.workspace.dragSurface.getGroup() : null;
var element = this.getSvgRoot();
if (element) {
do {
// Loop through this block and every parent.
var xy = Blockly.getRelativeXY_(element);
x += xy.x;
y += xy.y;
// If this element is the current element on the drag surface, include
// the translation of the drag surface itself.
if (this.workspace.dragSurface &&
this.workspace.dragSurface.getCurrentBlock() == element) {
var surfaceTranslation = this.workspace.dragSurface.getSurfaceTranslation();
x += surfaceTranslation.x;
y += surfaceTranslation.y;
}
element = element.parentNode;
} while (element && element != this.workspace.getCanvas() &&
element != dragSurfaceGroup);
}
return new goog.math.Coordinate(x, y);
};
/**
* Move a block by a relative offset.
* @param {number} dx Horizontal offset.
* @param {number} dy Vertical offset.
*/
Blockly.BlockSvg.prototype.moveBy = function(dx, dy) {
goog.asserts.assert(!this.parentBlock_, 'Block has parent.');
var eventsEnabled = Blockly.Events.isEnabled();
if (eventsEnabled) {
var event = new Blockly.Events.Move(this);
}
var xy = this.getRelativeToSurfaceXY();
this.translate(xy.x + dx, xy.y + dy);
this.moveConnections_(dx, dy);
if (eventsEnabled) {
event.recordNew();
Blockly.Events.fire(event);
}
};
/**
* Set this block to an absolute translation.
* @param {number} x Horizontal translation.
* @param {number} y Vertical translation.
* @param {boolean=} opt_use3d If set, use 3d translation.
*/
Blockly.BlockSvg.prototype.translate = function(x, y, opt_use3d) {
if (opt_use3d) {
this.getSvgRoot().setAttribute('style', 'transform: translate3d(' + x + 'px,' + y + 'px, 0px)');
} else {
this.getSvgRoot().setAttribute('transform', 'translate(' + x + ',' + y + ')');
}
};
/**
* Snap this block to the nearest grid point.
*/
Blockly.BlockSvg.prototype.snapToGrid = function() {
if (!this.workspace) {
return; // Deleted block.
}
if (Blockly.dragMode_ != Blockly.DRAG_NONE) {
return; // Don't bump blocks during a drag.
}
if (this.getParent()) {
return; // Only snap top-level blocks.
}
if (this.isInFlyout) {
return; // Don't move blocks around in a flyout.
}
if (!this.workspace.options.gridOptions ||
!this.workspace.options.gridOptions['snap']) {
return; // Config says no snapping.
}
var spacing = this.workspace.options.gridOptions['spacing'];
var half = spacing / 2;
var xy = this.getRelativeToSurfaceXY();
var dx = Math.round((xy.x - half) / spacing) * spacing + half - xy.x;
var dy = Math.round((xy.y - half) / spacing) * spacing + half - xy.y;
dx = Math.round(dx);
dy = Math.round(dy);
if (dx != 0 || dy != 0) {
this.moveBy(dx, dy);
}
};
/**
* Returns the coordinates of a bounding box describing the dimensions of this
* block and any blocks stacked below it.
* @return {!{topLeft: goog.math.Coordinate, bottomRight: goog.math.Coordinate}}
* Object with top left and bottom right coordinates of the bounding box.
*/
Blockly.BlockSvg.prototype.getBoundingRectangle = function() {
var blockXY = this.getRelativeToSurfaceXY(this);
var blockBounds = this.getHeightWidth();
var topLeft;
var bottomRight;
if (this.RTL) {
topLeft = new goog.math.Coordinate(blockXY.x - blockBounds.width,
blockXY.y);
bottomRight = new goog.math.Coordinate(blockXY.x,
blockXY.y + blockBounds.height);
} else {
topLeft = new goog.math.Coordinate(blockXY.x, blockXY.y);
bottomRight = new goog.math.Coordinate(blockXY.x + blockBounds.width,
blockXY.y + blockBounds.height);
}
return {topLeft: topLeft, bottomRight: bottomRight};
};
/**
* Set block opacity for SVG rendering.
* @param {number} opacity Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.setOpacity = function(opacity) {
this.opacity_ = opacity;
if (this.rendered) {
this.updateColour();
}
};
/**
* Get block opacity for SVG rendering.
* @return {number} Intended opacity, betweeen 0 and 1
*/
Blockly.BlockSvg.prototype.getOpacity = function() {
return this.opacity_;
};
/**
* Set whether the block is collapsed or not.
* @param {boolean} collapsed True if collapsed.
*/
Blockly.BlockSvg.prototype.setCollapsed = function(collapsed) {
if (this.collapsed_ == collapsed) {
return;
}
var renderList = [];
// Show/hide the inputs.
for (var i = 0, input; input = this.inputList[i]; i++) {
renderList.push.apply(renderList, input.setVisible(!collapsed));
}
var COLLAPSED_INPUT_NAME = '_TEMP_COLLAPSED_INPUT';
if (collapsed) {
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].setVisible(false);
}
var text = this.toString(Blockly.COLLAPSE_CHARS);
this.appendDummyInput(COLLAPSED_INPUT_NAME).appendField(text).init();
} else {
this.removeInput(COLLAPSED_INPUT_NAME);
// Clear any warnings inherited from enclosed blocks.
this.setWarningText(null);
}
Blockly.BlockSvg.superClass_.setCollapsed.call(this, collapsed);
if (!renderList.length) {
// No child blocks, just render this block.
renderList[0] = this;
}
if (this.rendered) {
for (var i = 0, block; block = renderList[i]; i++) {
block.render();
}
// Don't bump neighbours.
// Although bumping neighbours would make sense, users often collapse
// all their functions and store them next to each other. Expanding and
// bumping causes all their definitions to go out of alignment.
}
};
/**
* Open the next (or previous) FieldTextInput.
* @param {Blockly.Field|Blockly.Block} start Current location.
* @param {boolean} forward If true go forward, otherwise backward.
*/
Blockly.BlockSvg.prototype.tab = function(start, forward) {
// This function need not be efficient since it runs once on a keypress.
// Create an ordered list of all text fields and connected inputs.
var list = [];
for (var i = 0, input; input = this.inputList[i]; i++) {
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (field instanceof Blockly.FieldTextInput) {
// TODO: Also support dropdown fields.
list.push(field);
}
}
if (input.connection) {
var block = input.connection.targetBlock();
if (block) {
list.push(block);
}
}
}
i = list.indexOf(start);
if (i == -1) {
// No start location, start at the beginning or end.
i = forward ? -1 : list.length;
}
var target = list[forward ? i + 1 : i - 1];
if (!target) {
// Ran off of list.
var parent = this.getParent();
if (parent) {
parent.tab(this, forward);
}
} else if (target instanceof Blockly.Field) {
target.showEditor_();
} else {
target.tab(null, forward);
}
};
/**
* Handle a mouse-down on an SVG block.
* @param {!Event} e Mouse down event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseDown_ = function(e) {
if (this.workspace.options.readOnly) {
return;
}
if (this.isInFlyout) {
e.stopPropagation();
return;
}
this.workspace.markFocused();
// Update Blockly's knowledge of its own location.
Blockly.svgResize(this.workspace);
Blockly.terminateDrag_();
this.select();
Blockly.hideChaff();
this.workspace.recordDeleteAreas();
if (Blockly.isRightButton(e)) {
// Right-click.
this.showContextMenu_(e);
} else if (!this.isMovable()) {
// Allow immovable blocks to be selected and context menued, but not
// dragged. Let this event bubble up to document, so the workspace may be
// dragged instead.
return;
} else {
if (!Blockly.Events.getGroup()) {
Blockly.Events.setGroup(true);
}
// Left-click (or middle click)
Blockly.Css.setCursor(Blockly.Css.Cursor.CLOSED);
this.dragStartXY_ = this.getRelativeToSurfaceXY();
this.workspace.startDrag(e, this.dragStartXY_);
Blockly.dragMode_ = Blockly.DRAG_STICKY;
Blockly.BlockSvg.onMouseUpWrapper_ = Blockly.bindEvent_(document,
'mouseup', this, this.onMouseUp_);
Blockly.BlockSvg.onMouseMoveWrapper_ = Blockly.bindEvent_(document,
'mousemove', this, this.onMouseMove_);
// Build a list of bubbles that need to be moved and where they started.
this.draggedBubbles_ = [];
var descendants = this.getDescendants();
for (var i = 0, descendant; descendant = descendants[i]; i++) {
var icons = descendant.getIcons();
for (var j = 0; j < icons.length; j++) {
var data = icons[j].getIconLocation();
data.bubble = icons[j];
this.draggedBubbles_.push(data);
}
}
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
e.preventDefault();
};
/**
* Handle a mouse-up anywhere in the SVG pane. Is only registered when a
* block is clicked. We can't use mouseUp on the block since a fast-moving
* cursor can briefly escape the block before it catches up.
* @param {!Event} e Mouse up event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseUp_ = function(e) {
var isNotShadowBlock = this.ioClickHackIsNotShadow_(e);
if (Blockly.dragMode_ != Blockly.DRAG_FREE && !Blockly.WidgetDiv.isVisible() && isNotShadowBlock) {
Blockly.Events.fire(
new Blockly.Events.Ui(this, 'click', undefined, undefined));
// Scratch-specific: also fire a "stack click" event for this stack.
// This is used to toggle the stack when any block in the stack is clicked.
var rootBlock = this.workspace.getBlockById(this.id).getRootBlock();
Blockly.Events.fire(
new Blockly.Events.Ui(rootBlock, 'stackclick', undefined, undefined));
}
Blockly.terminateDrag_();
if (Blockly.selected && Blockly.highlightedConnection_) {
this.positionNewBlock(Blockly.selected,
Blockly.localConnection_, Blockly.highlightedConnection_);
// Connect two blocks together.
Blockly.localConnection_.connect(Blockly.highlightedConnection_);
if (this.rendered) {
// Trigger a connection animation.
// Determine which connection is inferior (lower in the source stack).
var inferiorConnection = Blockly.localConnection_.isSuperior() ?
Blockly.highlightedConnection_ : Blockly.localConnection_;
inferiorConnection.getSourceBlock().connectionUiEffect();
}
if (this.workspace.trashcan) {
// Don't throw an object in the trash can if it just got connected.
this.workspace.trashcan.close();
}
} else if (!this.getParent() && Blockly.selected.isDeletable() &&
this.workspace.isDeleteArea(e)) {
var trashcan = this.workspace.trashcan;
if (trashcan) {
goog.Timer.callOnce(trashcan.close, 100, trashcan);
}
Blockly.selected.dispose(false, true);
// Dropping a block on the trash can will usually cause the workspace to
// resize to contain the newly positioned block. Force a second resize
// now that the block has been deleted.
Blockly.asyncSvgResize(this.workspace);
}
if (Blockly.highlightedConnection_) {
Blockly.highlightedConnection_ = null;
}
Blockly.Css.setCursor(Blockly.Css.Cursor.OPEN);
if (!Blockly.WidgetDiv.isVisible()) {
Blockly.Events.setGroup(false);
}
};
/**
* XXX: Hack to fix drop-down clicking issue for Google I/O.
* We cannot just check isShadow, since `this` is the parent block.
* See: https://github.com/google/blockly/issues/336
* @param {!Event} e Mouse up event.
* @return {boolean} True if the block is not the drop-down shadow.
*/
Blockly.BlockSvg.prototype.ioClickHackIsNotShadow_ = function(e) {
// True if click target is a non-shadow block path.
if (e.target === this.svgPath_ &&
e.target.parentNode === this.getSvgRoot()) {
return true;
}
for (var i = 0, input; input = this.inputList[i]; i++) {
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (field.imageElement_ && field.imageElement_ === e.target) {
return true;
}
}
}
return false;
};
/**
* Load the block's help page in a new window.
* @private
*/
Blockly.BlockSvg.prototype.showHelp_ = function() {
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
if (url) {
// @todo rewrite
alert(url);
}
};
/**
* Show the context menu for this block.
* @param {!Event} e Mouse event.
* @private
*/
Blockly.BlockSvg.prototype.showContextMenu_ = function(e) {
if (this.workspace.options.readOnly || !this.contextMenu) {
return;
}
// Save the current block in a variable for use in closures.
var block = this;
var menuOptions = [];
if (this.isDeletable() && this.isMovable() && !block.isInFlyout) {
// Option to duplicate this block.
var duplicateOption = {
text: Blockly.Msg.DUPLICATE_BLOCK,
enabled: true,
callback: function() {
Blockly.duplicate_(block);
}
};
if (this.getDescendants().length > this.workspace.remainingCapacity()) {
duplicateOption.enabled = false;
}
menuOptions.push(duplicateOption);
if (this.isEditable() && this.workspace.options.comments) {
// Option to add/remove a comment.
var commentOption = {enabled: !goog.userAgent.IE};
if (this.comment) {
commentOption.text = Blockly.Msg.REMOVE_COMMENT;
commentOption.callback = function() {
block.setCommentText(null);
};
} else {
commentOption.text = Blockly.Msg.ADD_COMMENT;
commentOption.callback = function() {
block.setCommentText('');
};
}
menuOptions.push(commentOption);
}
// Option to delete this block.
// Count the number of blocks that are nested in this block.
var descendantCount = this.getDescendants(true).length;
var nextBlock = this.getNextBlock();
if (nextBlock) {
// Blocks in the current stack would survive this block's deletion.
descendantCount -= nextBlock.getDescendants(true).length;
}
var deleteOption = {
text: descendantCount == 1 ? Blockly.Msg.DELETE_BLOCK :
Blockly.Msg.DELETE_X_BLOCKS.replace('%1', String(descendantCount)),
enabled: true,
callback: function() {
Blockly.Events.setGroup(true);
block.dispose(true, true);
Blockly.Events.setGroup(false);
}
};
menuOptions.push(deleteOption);
}
// Option to get help.
var url = goog.isFunction(this.helpUrl) ? this.helpUrl() : this.helpUrl;
var helpOption = {enabled: !!url};
helpOption.text = Blockly.Msg.HELP;
helpOption.callback = function() {
block.showHelp_();
};
menuOptions.push(helpOption);
// Allow the block to add or modify menuOptions.
if (this.customContextMenu && !block.isInFlyout) {
this.customContextMenu(menuOptions);
}
Blockly.ContextMenu.show(e, menuOptions, this.RTL);
Blockly.ContextMenu.currentBlock = this;
};
/**
* Move the connections for this block and all blocks attached under it.
* Also update any attached bubbles.
* @param {number} dx Horizontal offset from current location.
* @param {number} dy Vertical offset from current location.
* @private
*/
Blockly.BlockSvg.prototype.moveConnections_ = function(dx, dy) {
if (!this.rendered) {
// Rendering is required to lay out the blocks.
// This is probably an invisible block attached to a collapsed block.
return;
}
var myConnections = this.getConnections_(false);
for (var i = 0; i < myConnections.length; i++) {
myConnections[i].moveBy(dx, dy);
}
var icons = this.getIcons();
for (i = 0; i < icons.length; i++) {
icons[i].computeIconLocation();
}
// Recurse through all blocks attached under this one.
for (i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].moveConnections_(dx, dy);
}
};
/**
* Recursively adds or removes the dragging class to this node and its children.
* @param {boolean} adding True if adding, false if removing.
* @private
*/
Blockly.BlockSvg.prototype.setDragging_ = function(adding) {
if (adding) {
this.addDragging();
Blockly.draggingConnections_ =
Blockly.draggingConnections_.concat(this.getConnections_(true));
} else {
this.removeDragging();
Blockly.draggingConnections_ = [];
}
// Recurse through all blocks attached under this one.
for (var i = 0; i < this.childBlocks_.length; i++) {
this.childBlocks_[i].setDragging_(adding);
}
};
/**
* Move this block to its workspace's drag surface, accounting for positioning.
* Generally should be called at the same time as setDragging_(true).
* @private
*/
Blockly.BlockSvg.prototype.moveToDragSurface_ = function() {
// The translation for drag surface blocks,
// is equal to the current relative-to-surface position,
// to keep the position in sync as it move on/off the surface.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.workspace.dragSurface.translateSurface(xy.x, xy.y);
// Execute the move on the top-level SVG component
this.workspace.dragSurface.setBlocksAndShow(this.getSvgRoot());
};
/**
* Move this block back to the workspace block canvas.
* Generally should be called at the same time as setDragging_(false).
* @private
*/
Blockly.BlockSvg.prototype.moveOffDragSurface_ = function() {
// Translate to current position, turning off 3d.
var xy = this.getRelativeToSurfaceXY();
this.clearTransformAttributes_();
this.translate(xy.x, xy.y, false);
this.workspace.dragSurface.clearAndHide(this.workspace.getCanvas());
};
/**
* Clear the block of style="..." and transform="..." attributes.
* Used when the block is switching from 3d to 2d transform or vice versa.
* @private
*/
Blockly.BlockSvg.prototype.clearTransformAttributes_ = function() {
if (this.getSvgRoot().hasAttribute('transform')) {
this.getSvgRoot().removeAttribute('transform');
}
if (this.getSvgRoot().hasAttribute('style')) {
this.getSvgRoot().removeAttribute('style');
}
};
/**
* Drag this block to follow the mouse.
* @param {!Event} e Mouse move event.
* @private
*/
Blockly.BlockSvg.prototype.onMouseMove_ = function(e) {
if (e.type == 'mousemove' && e.clientX <= 1 && e.clientY == 0 &&
e.button == 0) {
/* HACK:
Safari Mobile 6.0 and Chrome for Android 18.0 fire rogue mousemove
events on certain touch actions. Ignore events with these signatures.
This may result in a one-pixel blind spot in other browsers,
but this shouldn't be noticeable. */
e.stopPropagation();
return;
}
var oldXY = this.getRelativeToSurfaceXY();
var newXY = this.workspace.moveDrag(e);
if (Blockly.dragMode_ == Blockly.DRAG_STICKY) {
// Still dragging within the sticky DRAG_RADIUS.
var dr = goog.math.Coordinate.distance(oldXY, newXY) * this.workspace.scale;
if (dr > Blockly.DRAG_RADIUS) {
// Switch to unrestricted dragging.
Blockly.dragMode_ = Blockly.DRAG_FREE;
Blockly.longStop_();
// Must move to drag surface before unplug(),
// or else connections will calculate the wrong relative to surface XY
// in tighten_(). Then blocks connected to this block move around on the
// drag surface. By moving to the drag surface before unplug, connection
// positions will be calculated correctly.
this.moveToDragSurface_();
// Clear WidgetDiv/DropDownDiv without animating, in case blocks are moved
// around
Blockly.WidgetDiv.hide(true);
Blockly.DropDownDiv.hideWithoutAnimation();
if (this.parentBlock_) {
// Push this block to the very top of the stack.
this.unplug();
}
this.setDragging_(true);
}
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
this.handleDragFree_(oldXY, newXY, e);
}
// This event has been handled. No need to bubble up to the document.
e.stopPropagation();
e.preventDefault();
};
/**
* Handle a mouse movement when a block is already freely dragging.
* @param {!goog.math.Coordinate} oldXY The position of the block on screen
* before the most recent mouse movement.
* @param {!goog.math.Coordinate} newXY The new location after applying the
* mouse movement.
* @param {!Event} e Mouse move event.
* @private
*/
Blockly.BlockSvg.prototype.handleDragFree_ = function(oldXY, newXY, e) {
var dxy = goog.math.Coordinate.difference(oldXY, this.dragStartXY_);
this.workspace.dragSurface.translateSurface(newXY.x, newXY.y);
// Drag all the nested bubbles.
for (var i = 0; i < this.draggedBubbles_.length; i++) {
var commentData = this.draggedBubbles_[i];
commentData.bubble.setIconLocation(
goog.math.Coordinate.sum(commentData, dxy));
}
// Check to see if any of this block's connections are within range of
// another block's connection.
var myConnections = this.getConnections_(false);
// Also check the last connection on this stack
var lastOnStack = this.lastConnectionInStack();
if (lastOnStack && lastOnStack != this.nextConnection) {
myConnections.push(lastOnStack);
}
var closestConnection = null;
var localConnection = null;
var radiusConnection = Blockly.SNAP_RADIUS;
for (i = 0; i < myConnections.length; i++) {
var myConnection = myConnections[i];
var neighbour = myConnection.closest(radiusConnection, dxy);
if (neighbour.connection) {
closestConnection = neighbour.connection;
localConnection = myConnection;
radiusConnection = neighbour.radius;
}
}
var updatePreviews = true;
if (Blockly.localConnection_ && Blockly.highlightedConnection_) {
var xDiff = Blockly.localConnection_.x_ + dxy.x -
Blockly.highlightedConnection_.x_;
var yDiff = Blockly.localConnection_.y_ + dxy.y -
Blockly.highlightedConnection_.y_;
var curDistance = Math.sqrt(xDiff * xDiff + yDiff * yDiff);
// Slightly prefer the existing preview over a new preview.
if (closestConnection && radiusConnection > curDistance -
Blockly.CURRENT_CONNECTION_PREFERENCE) {
updatePreviews = false;
}
}
if (updatePreviews) {
var candidateIsLast = (localConnection == lastOnStack);
this.updatePreviews(closestConnection, localConnection, radiusConnection,
e, newXY.x - this.dragStartXY_.x, newXY.y - this.dragStartXY_.y,
candidateIsLast);
}
};
/**
* Preview the results of the drag if the mouse is released immediately.
* @param {Blockly.Connection} closestConnection The closest connection found
* during the search
* @param {Blockly.Connection} localConnection The connection on the moving
* block.
* @param {number} radiusConnection The distance between closestConnection and
* localConnection.
* @param {!Event} e Mouse move event.
* @param {number} dx The x distance the block has moved onscreen up to this
* point in the drag.
* @param {number} dy The y distance the block has moved onscreen up to this
* point in the drag.
* @param {boolean} candidateIsLast True if the dragging stack is more than one
* block long and localConnection is the last connection on the stack.
*/
Blockly.BlockSvg.prototype.updatePreviews = function(closestConnection,
localConnection, radiusConnection, e, dx, dy, candidateIsLast) {
// Don't fire events for insertion marker creation or movement.
Blockly.Events.disable();
// Remove an insertion marker if needed. For Scratch-Blockly we are using
// grayed-out blocks instead of highlighting the connection; for compatibility
// with Web Blockly the name "highlightedConnection" will still be used.
if (Blockly.highlightedConnection_ &&
Blockly.highlightedConnection_ != closestConnection) {
if (Blockly.replacementMarker_) {
Blockly.BlockSvg.removeReplacementMarker();
} else if (Blockly.insertionMarker_ && Blockly.insertionMarkerConnection_) {
Blockly.BlockSvg.disconnectInsertionMarker();
}
// If there's already an insertion marker but it's representing the wrong
// block, delete it so we can create the correct one.
if (Blockly.insertionMarker_ &&
((candidateIsLast && Blockly.localConnection_.sourceBlock_ == this) ||
(!candidateIsLast && Blockly.localConnection_.sourceBlock_ != this))) {
Blockly.insertionMarker_.dispose();
Blockly.insertionMarker_ = null;
}
Blockly.highlightedConnection_ = null;
Blockly.localConnection_ = null;
}
// Add an insertion marker or replacement marker if needed.
if (closestConnection &&
closestConnection != Blockly.highlightedConnection_ &&
!closestConnection.sourceBlock_.isInsertionMarker()) {
Blockly.highlightedConnection_ = closestConnection;
Blockly.localConnection_ = localConnection;
// Dragging a block over a nexisting block in an input should replace the
// existing block and bump it out. Similarly, dragging a terminal block
// over another (connected) terminal block will replace, not insert.
var shouldReplace = (localConnection.type == Blockly.OUTPUT_VALUE ||
(localConnection.type == Blockly.PREVIOUS_STATEMENT &&
closestConnection.isConnected() &&
!this.nextConnection));
if (shouldReplace) {
this.addReplacementMarker_(localConnection, closestConnection);
} else { // Should insert
this.connectInsertionMarker_(localConnection, closestConnection);
}
}
// Reenable events.
Blockly.Events.enable();
// Provide visual indication of whether the block will be deleted if
// dropped here.
if (this.isDeletable()) {
this.workspace.isDeleteArea(e);
}
};
/**
* Add highlighting showing which block will be replaced.
* @param {Blockly.Connection} localConnection The connection on the dragging
* block.
* @param {Blockly.Connection} closestConnection The connnection to pretend to
* connect to.
*/
Blockly.BlockSvg.prototype.addReplacementMarker_ = function(localConnection,
closestConnection) {
if (closestConnection.targetBlock()) {
Blockly.replacementMarker_ = closestConnection.targetBlock();
Blockly.replacementMarker_.highlightForReplacement(true);
} else if(localConnection.type == Blockly.OUTPUT_VALUE) {
Blockly.replacementMarker_ = closestConnection.sourceBlock_;
Blockly.replacementMarker_.highlightShapeForInput(closestConnection,
true);
}
};
/**
* Get rid of the highlighting marking the block that will be replaced.
*/
Blockly.BlockSvg.removeReplacementMarker = function() {
// If there's no block in place, but we're still connecting to a value input,
// then we must be highlighting an input shape.
if (Blockly.highlightedConnection_.type == Blockly.INPUT_VALUE &&
!Blockly.highlightedConnection_.isConnected()) {
Blockly.replacementMarker_.highlightShapeForInput(
Blockly.highlightedConnection_, false);
} else {
Blockly.replacementMarker_.highlightForReplacement(false);
}
Blockly.replacementMarker_ = null;
};
/**
* Place and render an insertion marker to indicate what would happen if you
* release the drag right now.
* @param {Blockly.Connection} localConnection The connection on the dragging
* block.
* @param {Blockly.Connection} closestConnection The connnection to connect the
* insertion marker to.
*/
Blockly.BlockSvg.prototype.connectInsertionMarker_ = function(localConnection,
closestConnection) {
if (!Blockly.insertionMarker_) {
Blockly.insertionMarker_ =
this.workspace.newBlock(Blockly.localConnection_.sourceBlock_.type);
Blockly.insertionMarker_.setInsertionMarker(true);
Blockly.insertionMarker_.initSvg();
}
var insertionMarker = Blockly.insertionMarker_;
var insertionMarkerConnection = insertionMarker.getMatchingConnection(
localConnection.sourceBlock_, localConnection);
if (insertionMarkerConnection != Blockly.insertionMarkerConnection_) {
insertionMarker.rendered = true;
// Render disconnected from everything else so that we have a valid
// connection location.
insertionMarker.render();
insertionMarker.getSvgRoot().setAttribute('visibility', 'visible');
this.positionNewBlock(insertionMarker,
insertionMarkerConnection, closestConnection);
if (insertionMarkerConnection.type == Blockly.PREVIOUS_STATEMENT &&
!insertionMarker.nextConnection) {
Blockly.bumpedConnection_ = closestConnection.targetConnection;
}
// Renders insertion marker.
insertionMarkerConnection.connect(closestConnection);
Blockly.insertionMarkerConnection_ = insertionMarkerConnection;
}
};
/**
* Disconnect the current insertion marker from the stack, and heal the stack to
* its previous state.
*/
Blockly.BlockSvg.disconnectInsertionMarker = function() {
// The insertion marker is the first block in a stack, either because it
// doesn't have a previous connection or because the previous connection is
// not connected. Unplug won't do anything in that case. Instead, unplug the
// following block.
if (Blockly.insertionMarkerConnection_ ==
Blockly.insertionMarker_.nextConnection &&
(!Blockly.insertionMarker_.previousConnection ||
!Blockly.insertionMarker_.previousConnection.targetConnection)) {
Blockly.insertionMarkerConnection_.targetBlock().unplug(false);
}
// Inside of a C-block, first statement connection.
else if (Blockly.insertionMarkerConnection_.type == Blockly.NEXT_STATEMENT &&
Blockly.insertionMarkerConnection_ !=
Blockly.insertionMarker_.nextConnection) {
var innerConnection = Blockly.insertionMarkerConnection_.targetConnection;
innerConnection.sourceBlock_.unplug(false);
var previousBlockNextConnection =
Blockly.insertionMarker_.previousConnection.targetConnection;
Blockly.insertionMarker_.unplug(true);
if (previousBlockNextConnection) {
previousBlockNextConnection.connect(innerConnection);
}
}
else {
Blockly.insertionMarker_.unplug(true /* healStack */);
}
if (Blockly.insertionMarkerConnection_.targetConnection) {
throw 'insertionMarkerConnection still connected at the end of disconnectInsertionMarker';
}
Blockly.insertionMarkerConnection_ = null;
Blockly.insertionMarker_.getSvgRoot().setAttribute('visibility', 'hidden');
};
/**
* Add or remove the UI indicating if this block is movable or not.
*/
Blockly.BlockSvg.prototype.updateMovable = function() {
if (this.isMovable()) {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
} else {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDraggable');
}
};
/**
* Set whether this block is movable or not.
* @param {boolean} movable True if movable.
*/
Blockly.BlockSvg.prototype.setMovable = function(movable) {
Blockly.BlockSvg.superClass_.setMovable.call(this, movable);
this.updateMovable();
};
/**
* Set whether this block is editable or not.
* @param {boolean} editable True if editable.
*/
Blockly.BlockSvg.prototype.setEditable = function(editable) {
Blockly.BlockSvg.superClass_.setEditable.call(this, editable);
if (this.rendered) {
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].updateEditable();
}
}
};
/**
* Set whether this block is a shadow block or not.
* @param {boolean} shadow True if a shadow.
*/
Blockly.BlockSvg.prototype.setShadow = function(shadow) {
Blockly.BlockSvg.superClass_.setShadow.call(this, shadow);
this.updateColour();
};
/**
* Set whether this block is an insertion marker block or not.
* @param {boolean} insertionMarker True if an insertion marker.
*/
Blockly.BlockSvg.prototype.setInsertionMarker = function(insertionMarker) {
Blockly.BlockSvg.superClass_.setInsertionMarker.call(this, insertionMarker);
this.updateColour();
};
/**
* Return the root node of the SVG or null if none exists.
* @return {Element} The root SVG node (probably a group).
*/
Blockly.BlockSvg.prototype.getSvgRoot = function() {
return this.svgGroup_;
};
/**
* Dispose of this block.
* @param {boolean} healStack If true, then try to heal any gap by connecting
* the next statement with the previous statement. Otherwise, dispose of
* all children of this block.
* @param {boolean} animate If true, show a disposal animation and sound.
*/
Blockly.BlockSvg.prototype.dispose = function(healStack, animate) {
Blockly.Tooltip.hide();
Blockly.Field.startCache();
// If this block is being dragged, unlink the mouse events.
if (Blockly.selected == this) {
this.unselect();
Blockly.terminateDrag_();
}
// If this block has a context menu open, close it.
if (Blockly.ContextMenu.currentBlock == this) {
Blockly.ContextMenu.hide();
}
if (animate && this.rendered) {
this.unplug(healStack);
this.disposeUiEffect();
}
// Stop rerendering.
this.rendered = false;
Blockly.Events.disable();
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].dispose();
}
Blockly.Events.enable();
Blockly.BlockSvg.superClass_.dispose.call(this, healStack);
goog.dom.removeNode(this.svgGroup_);
// Sever JavaScript to DOM connections.
this.svgGroup_ = null;
this.svgPath_ = null;
Blockly.Field.stopCache();
};
/**
* Play some UI effects (sound, animation) when disposing of a block.
*/
Blockly.BlockSvg.prototype.disposeUiEffect = function() {
this.workspace.playAudio('delete');
var xy = Blockly.getSvgXY_(/** @type {!Element} */ (this.svgGroup_),
this.workspace);
// Deeply clone the current block.
var clone = this.svgGroup_.cloneNode(true);
clone.translateX_ = xy.x;
clone.translateY_ = xy.y;
clone.setAttribute('transform',
'translate(' + clone.translateX_ + ',' + clone.translateY_ + ')');
this.workspace.getParentSvg().appendChild(clone);
clone.bBox_ = clone.getBBox();
// Start the animation.
Blockly.BlockSvg.disposeUiStep_(clone, this.RTL, new Date(),
this.workspace.scale);
};
/**
* Play some UI effects (sound) after a connection has been established.
*/
Blockly.BlockSvg.prototype.connectionUiEffect = function() {
this.workspace.playAudio('click');
};
/**
* Animate a cloned block and eventually dispose of it.
* This is a class method, not an instace method since the original block has
* been destroyed and is no longer accessible.
* @param {!Element} clone SVG element to animate and dispose of.
* @param {boolean} rtl True if RTL, false if LTR.
* @param {!Date} start Date of animation's start.
* @param {number} workspaceScale Scale of workspace.
* @private
*/
Blockly.BlockSvg.disposeUiStep_ = function(clone, rtl, start, workspaceScale) {
var ms = (new Date()) - start;
var percent = ms / 150;
if (percent > 1) {
goog.dom.removeNode(clone);
} else {
var x = clone.translateX_ +
(rtl ? -1 : 1) * clone.bBox_.width * workspaceScale / 2 * percent;
var y = clone.translateY_ + clone.bBox_.height * workspaceScale * percent;
var scale = (1 - percent) * workspaceScale;
clone.setAttribute('transform', 'translate(' + x + ',' + y + ')' +
' scale(' + scale + ')');
var closure = function() {
Blockly.BlockSvg.disposeUiStep_(clone, rtl, start, workspaceScale);
};
setTimeout(closure, 10);
}
};
/**
* Enable or disable a block.
*/
Blockly.BlockSvg.prototype.updateDisabled = function() {
// not supported
};
/**
* Returns the comment on this block (or '' if none).
* @return {string} Block's comment.
*/
Blockly.BlockSvg.prototype.getCommentText = function() {
if (this.comment) {
var comment = this.comment.getText();
// Trim off trailing whitespace.
return comment.replace(/\s+$/, '').replace(/ +\n/g, '\n');
}
return '';
};
/**
* Set this block's comment text.
* @param {?string} text The text, or null to delete.
*/
Blockly.BlockSvg.prototype.setCommentText = function(text) {
var changedState = false;
if (goog.isString(text)) {
if (!this.comment) {
this.comment = new Blockly.Comment(this);
changedState = true;
}
this.comment.setText(/** @type {string} */ (text));
} else {
if (this.comment) {
this.comment.dispose();
changedState = true;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a comment icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Set this block's warning text.
* @param {?string} text The text, or null to delete.
* @param {string=} opt_id An optional ID for the warning text to be able to
* maintain multiple warnings.
*/
Blockly.BlockSvg.prototype.setWarningText = function(text, opt_id) {
if (!this.setWarningText.pid_) {
// Create a database of warning PIDs.
// Only runs once per block (and only those with warnings).
this.setWarningText.pid_ = Object.create(null);
}
var id = opt_id || '';
if (!id) {
// Kill all previous pending processes, this edit supercedes them all.
for (var n in this.setWarningText.pid_) {
clearTimeout(this.setWarningText.pid_[n]);
delete this.setWarningText.pid_[n];
}
} else if (this.setWarningText.pid_[id]) {
// Only queue up the latest change. Kill any earlier pending process.
clearTimeout(this.setWarningText.pid_[id]);
delete this.setWarningText.pid_[id];
}
if (Blockly.dragMode_ == Blockly.DRAG_FREE) {
// Don't change the warning text during a drag.
// Wait until the drag finishes.
var thisBlock = this;
this.setWarningText.pid_[id] = setTimeout(function() {
if (thisBlock.workspace) { // Check block wasn't deleted.
delete thisBlock.setWarningText.pid_[id];
thisBlock.setWarningText(text, id);
}
}, 100);
return;
}
if (this.isInFlyout) {
text = null;
}
var changedState = false;
if (goog.isString(text)) {
if (!this.warning) {
this.warning = new Blockly.Warning(this);
changedState = true;
}
this.warning.setText(/** @type {string} */ (text), id);
} else {
// Dispose all warnings if no id is given.
if (this.warning && !id) {
this.warning.dispose();
changedState = true;
} else if (this.warning) {
var oldText = this.warning.getText();
this.warning.setText('', id);
var newText = this.warning.getText();
if (!newText) {
this.warning.dispose();
}
changedState = oldText == newText;
}
}
if (changedState && this.rendered) {
this.render();
// Adding or removing a warning icon will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Give this block a mutator dialog.
* @param {Blockly.Mutator} mutator A mutator dialog instance or null to remove.
*/
Blockly.BlockSvg.prototype.setMutator = function(mutator) {
if (this.mutator && this.mutator !== mutator) {
this.mutator.dispose();
}
if (mutator) {
mutator.block_ = this;
this.mutator = mutator;
mutator.createIcon();
}
};
/**
* Select this block. Highlight it visually.
*/
Blockly.BlockSvg.prototype.addSelect = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
// Move the selected block to the top of the stack.
this.svgGroup_.parentNode.appendChild(this.svgGroup_);
};
/**
* Unselect this block. Remove its highlighting.
*/
Blockly.BlockSvg.prototype.removeSelect = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklySelected');
};
/**
* Adds the dragging class to this block.
*/
Blockly.BlockSvg.prototype.addDragging = function() {
Blockly.addClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
/**
* Removes the dragging class from this block.
*/
Blockly.BlockSvg.prototype.removeDragging = function() {
Blockly.removeClass_(/** @type {!Element} */ (this.svgGroup_),
'blocklyDragging');
};
// Overrides of functions on Blockly.Block that take into account whether the
// block has been rendered.
/**
* Change the colour of a block.
* @param {number|string} colour HSV hue value, or #RRGGBB string.
* @param {number|string} colourSecondary Secondary HSV hue value, or #RRGGBB
* string.
* @param {number|string} colourTertiary Tertiary HSV hue value, or #RRGGBB
* string.
*/
Blockly.BlockSvg.prototype.setColour = function(colour, colourSecondary,
colourTertiary) {
Blockly.BlockSvg.superClass_.setColour.call(this, colour, colourSecondary,
colourTertiary);
if (this.rendered) {
this.updateColour();
}
};
/**
* Set whether this block can chain onto the bottom of another block.
* @param {boolean} newBoolean True if there can be a previous statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setPreviousStatement =
function(newBoolean, opt_check) {
/* eslint-disable indent */
Blockly.BlockSvg.superClass_.setPreviousStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
}; /* eslint-enable indent */
/**
* Set whether another block can chain onto the bottom of this block.
* @param {boolean} newBoolean True if there can be a next statement.
* @param {string|Array.<string>|null|undefined} opt_check Statement type or
* list of statement types. Null/undefined if any type could be connected.
*/
Blockly.BlockSvg.prototype.setNextStatement = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setNextStatement.call(this, newBoolean,
opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether this block returns a value.
* @param {boolean} newBoolean True if there is an output.
* @param {string|Array.<string>|null|undefined} opt_check Returned type or list
* of returned types. Null or undefined if any type could be returned
* (e.g. variable get).
*/
Blockly.BlockSvg.prototype.setOutput = function(newBoolean, opt_check) {
Blockly.BlockSvg.superClass_.setOutput.call(this, newBoolean, opt_check);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Set whether value inputs are arranged horizontally or vertically.
* @param {boolean} newBoolean True if inputs are horizontal.
*/
Blockly.BlockSvg.prototype.setInputsInline = function(newBoolean) {
Blockly.BlockSvg.superClass_.setInputsInline.call(this, newBoolean);
if (this.rendered) {
this.render();
this.bumpNeighbours_();
}
};
/**
* Remove an input from this block.
* @param {string} name The name of the input.
* @param {boolean=} opt_quiet True to prevent error if input is not present.
* @throws {goog.asserts.AssertionError} if the input is not present and
* opt_quiet is not true.
*/
Blockly.BlockSvg.prototype.removeInput = function(name, opt_quiet) {
Blockly.BlockSvg.superClass_.removeInput.call(this, name, opt_quiet);
if (this.rendered) {
this.render();
// Removing an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Move a numbered input to a different location on this block.
* @param {number} inputIndex Index of the input to move.
* @param {number} refIndex Index of input that should be after the moved input.
*/
Blockly.BlockSvg.prototype.moveNumberedInputBefore = function(
inputIndex, refIndex) {
Blockly.BlockSvg.superClass_.moveNumberedInputBefore.call(this, inputIndex,
refIndex);
if (this.rendered) {
this.render();
// Moving an input will cause the block to change shape.
this.bumpNeighbours_();
}
};
/**
* Add a value input, statement input or local variable to this block.
* @param {number} type Either Blockly.INPUT_VALUE or Blockly.NEXT_STATEMENT or
* Blockly.DUMMY_INPUT.
* @param {string} name Language-neutral identifier which may used to find this
* input again. Should be unique to this block.
* @return {!Blockly.Input} The input object created.
* @private
*/
Blockly.BlockSvg.prototype.appendInput_ = function(type, name) {
var input = Blockly.BlockSvg.superClass_.appendInput_.call(this, type, name);
if (this.rendered) {
this.render();
// Adding an input will cause the block to change shape.
this.bumpNeighbours_();
}
return input;
};
/**
* Returns connections originating from this block.
* @param {boolean} all If true, return all connections even hidden ones.
* Otherwise, for a non-rendered block return an empty list, and for a
* collapsed block don't return inputs connections.
* @return {!Array.<!Blockly.Connection>} Array of connections.
* @private
*/
Blockly.BlockSvg.prototype.getConnections_ = function(all) {
var myConnections = [];
if (all || this.rendered) {
if (this.outputConnection) {
myConnections.push(this.outputConnection);
}
if (this.previousConnection) {
myConnections.push(this.previousConnection);
}
if (this.nextConnection) {
myConnections.push(this.nextConnection);
}
if (all || !this.collapsed_) {
for (var i = 0, input; input = this.inputList[i]; i++) {
if (input.connection) {
myConnections.push(input.connection);
}
}
}
}
return myConnections;
};
/**
* Create a connection of the specified type.
* @param {number} type The type of the connection to create.
* @return {!Blockly.RenderedConnection} A new connection of the specified type.
* @private
*/
Blockly.BlockSvg.prototype.makeConnection_ = function(type) {
return new Blockly.RenderedConnection(this, type);
};
| 1 | 7,872 | ...and in turn, this should probably be var updatePreviews = true; if (!(localConnection && localConnection.type == Blockly.OUTPUT_VALUE) && (Blockly.localConnection_ && Blockly.highlightedConnection_)) { since the first clause is a no-op. If you want to leave it this way for clarity, that's fine too. | LLK-scratch-blocks | js |
@@ -70,8 +70,7 @@ class BigqueryRulesEngine(bre.BaseRulesEngine):
self.rule_book = BigqueryRuleBook(self._load_rule_definitions())
# TODO: The naming is confusing and needs to be fixed in all scanners.
- def find_policy_violations(self, parent_project, bq_acl,
- force_rebuild=False):
+ def find_violations(self, parent_project, bq_acl, force_rebuild=False):
"""Determine whether Big Query datasets violate rules.
Args: | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for Big Query data sets."""
import collections
import enum
import itertools
import re
from google.cloud.forseti.common.gcp_type import resource_util
from google.cloud.forseti.common.gcp_type import resource as resource_mod
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import regular_exp
from google.cloud.forseti.common.util import relationship
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import errors as audit_errors
LOGGER = logger.get_logger(__name__)
class Mode(enum.Enum):
"""Rule modes."""
WHITELIST = 'whitelist'
BLACKLIST = 'blacklist'
# Rule definition wrappers.
# TODO: allow for multiple dataset ids.
RuleReference = collections.namedtuple(
'RuleReference', ['mode', 'dataset_ids', 'bindings'])
Binding = collections.namedtuple('Binding', ['role', 'members'])
Member = collections.namedtuple(
'Member', ['domain', 'group_email', 'user_email', 'special_group'],
)
class BigqueryRulesEngine(bre.BaseRulesEngine):
"""Rules engine for Big Query data sets"""
def __init__(self, rules_file_path, snapshot_timestamp=None):
"""Initialize.
Args:
rules_file_path (str): file location of rules
snapshot_timestamp (str): snapshot timestamp. Defaults to None.
If set, this will be the snapshot timestamp
used in the engine.
"""
super(BigqueryRulesEngine,
self).__init__(rules_file_path=rules_file_path)
self.rule_book = None
def build_rule_book(self, global_configs=None):
"""Build BigqueryRuleBook from the rules definition file.
Args:
global_configs (dict): Global configurations.
"""
self.rule_book = BigqueryRuleBook(self._load_rule_definitions())
# TODO: The naming is confusing and needs to be fixed in all scanners.
def find_policy_violations(self, parent_project, bq_acl,
force_rebuild=False):
"""Determine whether Big Query datasets violate rules.
Args:
parent_project (Project): parent project the acl belongs to.
bq_acl (BigqueryAccessControls): Object containing ACL data.
force_rebuild (bool): If True, rebuilds the rule book. This will
reload the rules definition file and add the rules to the book.
Returns:
generator: A generator of rule violations.
"""
if self.rule_book is None or force_rebuild:
self.build_rule_book()
violations = self.rule_book.find_policy_violations(
parent_project, bq_acl)
return violations
def add_rules(self, rules):
"""Add rules to the rule book.
Args:
rules (dict): rule definitions dictionary
"""
if self.rule_book is not None:
self.rule_book.add_rules(rules)
class BigqueryRuleBook(bre.BaseRuleBook):
"""The RuleBook for Big Query dataset resources."""
def __init__(self, rule_defs=None):
"""Initialization.
Args:
rule_defs (dict): rule definitons dictionary.
"""
super(BigqueryRuleBook, self).__init__()
self.resource_rules_map = collections.defaultdict(list)
if not rule_defs:
self.rule_defs = {}
else:
self.rule_defs = rule_defs
self.add_rules(rule_defs)
def add_rules(self, rule_defs):
"""Add rules to the rule book.
Args:
rule_defs (dict): rule definitions dictionary.
"""
for (i, rule) in enumerate(rule_defs.get('rules', [])):
self.add_rule(rule, i)
@classmethod
def _build_rule(cls, rule_def, rule_index):
"""Build a rule.
Args:
rule_def (dict): A dictionary containing rule definition
properties.
rule_index (int): The index of the rule from the rule definitions.
Assigned automatically when the rule book is built.
Returns:
Rule: rule for the given definition.
"""
dataset_ids = []
for dataset_id in rule_def.get('dataset_ids', []):
dataset_ids.append(regular_exp.escape_and_globify(dataset_id))
# Check `dataset_id` for backwards compatibility.
# TODO: stop supporting this.
if 'dataset_id' in rule_def:
dataset_ids.append(
regular_exp.escape_and_globify(rule_def['dataset_id'])
)
if not dataset_ids:
raise audit_errors.InvalidRulesSchemaError(
'Missing dataset_ids in rule {}'.format(rule_index))
bindings = []
# TODO: stop supporting this.
binding = cls._get_binding_from_old_syntax(rule_def)
if binding:
bindings.append(binding)
# Default mode to blacklist for backwards compatibility as that was
# the behaviour before mode was configurable.
# TODO: make mode required?
mode = Mode(rule_def.get('mode', 'blacklist'))
for raw_binding in rule_def.get('bindings', []):
if 'role' not in raw_binding:
raise audit_errors.InvalidRulesSchemaError(
'Missing role in binding in rule {}'.format(rule_index))
role = regular_exp.escape_and_globify(raw_binding['role'])
if 'members' not in raw_binding:
raise audit_errors.InvalidRulesSchemaError(
'Missing members in binding in rule {}'.format(rule_index))
members = []
for raw_member in raw_binding['members']:
fields = {
field: regular_exp.escape_and_globify(raw_member.get(field))
for field in [
'domain', 'group_email', 'user_email', 'special_group'
]
}
# only one key should be set per member
num_fields_set = sum(
[val is not None for val in fields.values()]
)
if num_fields_set != 1:
raise audit_errors.InvalidRulesSchemaError(
'At most one member field may be set in rule {}'.format(
rule_index))
members.append(Member(**fields))
bindings.append(Binding(role, members))
if not bindings:
raise audit_errors.InvalidRulesSchemaError(
'Missing bindings in rule {}'.format(rule_index))
return Rule(rule_name=rule_def.get('name'),
rule_index=rule_index,
rule_reference=RuleReference(
dataset_ids=dataset_ids,
bindings=bindings,
mode=mode))
@classmethod
def _get_binding_from_old_syntax(cls, rule_def):
"""Get a binding for configs set with the old syntax.
Default fields to glob as default as that is what the fields used to be
set.
Args:
rule_def (dict): raw rule definition.
Returns:
Binding: If an old style config field is set, returns a single binding
with a single member.
"""
keys = ['role', 'domain', 'group_email', 'user_email', 'special_group']
for key in keys:
if key in rule_def:
return Binding(
role=regular_exp.escape_and_globify(
rule_def.get('role', '*')),
members=[Member(
regular_exp.escape_and_globify(
rule_def.get('domain', '*')),
regular_exp.escape_and_globify(
rule_def.get('group_email', '*')),
regular_exp.escape_and_globify(
rule_def.get('user_email', '*')),
regular_exp.escape_and_globify(
rule_def.get('special_group', '*')),
)]
)
return None
def add_rule(self, rule_def, rule_index):
"""Add a rule to the rule book.
Args:
rule_def (dict): A dictionary containing rule definition
properties.
rule_index (int): The index of the rule from the rule definitions.
Assigned automatically when the rule book is built.
"""
resources = rule_def.get('resource')
for raw_resource in resources:
resource_ids = raw_resource.get('resource_ids')
if not resource_ids or len(resource_ids) < 1:
raise audit_errors.InvalidRulesSchemaError(
'Missing resource ids in rule {}'.format(rule_index))
rule = self._build_rule(rule_def, rule_index)
resource_type = raw_resource.get('type')
for resource_id in resource_ids:
resource = resource_util.create_resource(
resource_id=resource_id,
resource_type=resource_type,
)
self.resource_rules_map[resource].append(rule)
def find_policy_violations(self, resource, bq_acl):
"""Find acl violations in the rule book.
Args:
resource (gcp_type): The GCP resource associated with the acl.
This is where we start looking for rule violations and
we move up the resource hierarchy (if permitted by the
resource's "inherit_from_parents" property).
bq_acl (BigqueryAccessControls): The acl to compare the rules
against.
Returns:
iterable: A generator of the rule violations.
"""
violations = itertools.chain()
resource_ancestors = (
relationship.find_ancestors(resource, resource.full_name))
for res in resource_ancestors:
for rule in self.resource_rules_map.get(res, []):
violations = itertools.chain(
violations, rule.find_policy_violations(bq_acl))
return violations
class Rule(object):
"""Rule properties from the rule definition file.
Also finds violations.
"""
rule_violation_attributes = ['resource_type', 'resource_id',
'resource_name', 'full_name', 'rule_name',
'rule_index', 'violation_type', 'dataset_id',
'role', 'special_group', 'user_email',
'domain', 'group_email', 'view',
'resource_data']
frozen_rule_attributes = frozenset(rule_violation_attributes)
RuleViolation = collections.namedtuple(
'RuleViolation',
frozen_rule_attributes)
def __init__(self, rule_name, rule_index, rule_reference):
"""Initialize.
Args:
rule_name (str): Name of the loaded rule.
rule_index (int): The index of the rule from the rule definitions.
rule_reference (RuleReference): The rules from the file and
corresponding values.
"""
self.rule_name = rule_name
self.rule_index = rule_index
self.rule_reference = rule_reference
# TODO: The naming is confusing and needs to be fixed in all scanners.
def find_policy_violations(self, bigquery_acl):
"""Find BigQuery acl violations in the rule book.
Args:
bigquery_acl (BigqueryAccessControls): BigQuery ACL resource.
Yields:
namedtuple: Returns RuleViolation named tuple.
"""
matches = []
has_applicable_rules = False
for binding in self.rule_reference.bindings:
if not self._is_binding_applicable(binding, bigquery_acl):
continue
has_applicable_rules = True
for member in binding.members:
rule_regex_and_vals = [
(member.domain, bigquery_acl.domain),
(member.user_email, bigquery_acl.user_email),
(member.group_email, bigquery_acl.group_email),
(member.special_group, bigquery_acl.special_group),
]
# Note: bindings should only have 1 member field set, so at most
# one of the regex value pairs should be non-None. However,
# old style configs had to set all fields, so for backwards
# compatibility we have to check all.
# TODO: Once we are no longer supporting backwards
# compatibility, just match the first non-None pair and break.
sub_matches = [
re.match(regex, val)
for regex, val in rule_regex_and_vals
if regex is not None and val is not None
]
if not sub_matches:
continue
matches.append(all(sub_matches))
has_violation = (
self.rule_reference.mode == Mode.BLACKLIST and any(matches) or
self.rule_reference.mode == Mode.WHITELIST and not any(matches)
)
if has_applicable_rules and has_violation:
yield self.RuleViolation(
resource_name=bigquery_acl.dataset_id,
resource_type=resource_mod.ResourceType.BIGQUERY,
resource_id=bigquery_acl.dataset_id,
full_name=bigquery_acl.full_name,
rule_name=self.rule_name,
rule_index=self.rule_index,
violation_type='BIGQUERY_VIOLATION',
dataset_id=bigquery_acl.dataset_id,
role=bigquery_acl.role,
special_group=bigquery_acl.special_group or '',
user_email=bigquery_acl.user_email or '',
domain=bigquery_acl.domain or '',
group_email=bigquery_acl.group_email or '',
view=bigquery_acl.view,
resource_data=bigquery_acl.json,
)
def _is_binding_applicable(self, binding, bigquery_acl):
"""Determine whether the binding is applicable to the acl.
Args:
binding (Binding): rules binding to check against.
bigquery_acl (BigqueryAccessControls): BigQuery ACL resource.
Returns:
bool: True if the rules are applicable to the given acl, False
otherwise.
"""
# only one dataset needs to match, so union all dataset ids into one
# regex expression
dataset_ids_matched = re.match(
'|'.join(self.rule_reference.dataset_ids), bigquery_acl.dataset_id,
)
role_matched = re.match(binding.role, bigquery_acl.role)
return dataset_ids_matched and role_matched
| 1 | 32,462 | Please remove this TODO, since they will not apply anymore after you are done. :) Can you please remove this everywhere else in this PR? | forseti-security-forseti-security | py |
@@ -1667,6 +1667,13 @@ void ErrorTest() {
TestError("enum X:byte { Y, Y }", "value already");
TestError("enum X:byte { Y=2, Z=2 }", "unique");
TestError("table X { Y:int; } table X {", "datatype already");
+ TestError("table X { A:bool; } table Y { } union X {",
+ "enum clashes with datatype");
+ TestError("union X { X, Y } table X {", "datatype clashes with enum");
+ TestError("namespace A; table X { } namespace A; union X {",
+ "enum clashes with datatype");
+ TestError("namespace A; union X { } namespace A; table X {",
+ "datatype clashes with enum");
TestError("struct X (force_align: 7) { Y:int; }", "force_align");
TestError("struct X {}", "size 0");
TestError("{}", "no root"); | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/minireflect.h"
#include "flatbuffers/registry.h"
#include "flatbuffers/util.h"
// clang-format off
#ifdef FLATBUFFERS_CPP98_STL
namespace std {
using flatbuffers::unique_ptr;
}
#endif
// clang-format on
#include "monster_test_generated.h"
#include "namespace_test/namespace_test1_generated.h"
#include "namespace_test/namespace_test2_generated.h"
#include "union_vector/union_vector_generated.h"
#include "optional_scalars_generated.h"
#if !defined(_MSC_VER) || _MSC_VER >= 1700
# include "monster_extra_generated.h"
# include "arrays_test_generated.h"
# include "evolution_test/evolution_v1_generated.h"
# include "evolution_test/evolution_v2_generated.h"
#endif
#include "native_type_test_generated.h"
#include "test_assert.h"
#include "flatbuffers/flexbuffers.h"
#include "monster_test_bfbs_generated.h" // Generated using --bfbs-comments --bfbs-builtins --cpp --bfbs-gen-embed
// clang-format off
// Check that char* and uint8_t* are interoperable types.
// The reinterpret_cast<> between the pointers are used to simplify data loading.
static_assert(flatbuffers::is_same<uint8_t, char>::value ||
flatbuffers::is_same<uint8_t, unsigned char>::value,
"unexpected uint8_t type");
#if defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Ensure IEEE-754 support if tests of floats with NaN/Inf will run.
static_assert(std::numeric_limits<float>::is_iec559 &&
std::numeric_limits<double>::is_iec559,
"IEC-559 (IEEE-754) standard required");
#endif
// clang-format on
// Shortcuts for the infinity.
static const auto infinity_f = std::numeric_limits<float>::infinity();
static const auto infinity_d = std::numeric_limits<double>::infinity();
using namespace MyGame::Example;
void FlatBufferBuilderTest();
// Include simple random number generator to ensure results will be the
// same cross platform.
// http://en.wikipedia.org/wiki/Park%E2%80%93Miller_random_number_generator
uint32_t lcg_seed = 48271;
uint32_t lcg_rand() {
return lcg_seed =
(static_cast<uint64_t>(lcg_seed) * 279470273UL) % 4294967291UL;
}
void lcg_reset() { lcg_seed = 48271; }
std::string test_data_path =
#ifdef BAZEL_TEST_DATA_PATH
"../com_github_google_flatbuffers/tests/";
#else
"tests/";
#endif
// example of how to build up a serialized buffer algorithmically:
flatbuffers::DetachedBuffer CreateFlatBufferTest(std::string &buffer) {
flatbuffers::FlatBufferBuilder builder;
auto vec = Vec3(1, 2, 3, 0, Color_Red, Test(10, 20));
auto name = builder.CreateString("MyMonster");
unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
auto inventory = builder.CreateVector(inv_data, 10);
// Alternatively, create the vector first, and fill in data later:
// unsigned char *inv_buf = nullptr;
// auto inventory = builder.CreateUninitializedVector<unsigned char>(
// 10, &inv_buf);
// memcpy(inv_buf, inv_data, 10);
Test tests[] = { Test(10, 20), Test(30, 40) };
auto testv = builder.CreateVectorOfStructs(tests, 2);
// clang-format off
#ifndef FLATBUFFERS_CPP98_STL
// Create a vector of structures from a lambda.
auto testv2 = builder.CreateVectorOfStructs<Test>(
2, [&](size_t i, Test* s) -> void {
*s = tests[i];
});
#else
// Create a vector of structures using a plain old C++ function.
auto testv2 = builder.CreateVectorOfStructs<Test>(
2, [](size_t i, Test* s, void *state) -> void {
*s = (reinterpret_cast<Test*>(state))[i];
}, tests);
#endif // FLATBUFFERS_CPP98_STL
// clang-format on
// create monster with very few fields set:
// (same functionality as CreateMonster below, but sets fields manually)
flatbuffers::Offset<Monster> mlocs[3];
auto fred = builder.CreateString("Fred");
auto barney = builder.CreateString("Barney");
auto wilma = builder.CreateString("Wilma");
MonsterBuilder mb1(builder);
mb1.add_name(fred);
mlocs[0] = mb1.Finish();
MonsterBuilder mb2(builder);
mb2.add_name(barney);
mb2.add_hp(1000);
mlocs[1] = mb2.Finish();
MonsterBuilder mb3(builder);
mb3.add_name(wilma);
mlocs[2] = mb3.Finish();
// Create an array of strings. Also test string pooling, and lambdas.
auto vecofstrings =
builder.CreateVector<flatbuffers::Offset<flatbuffers::String>>(
4,
[](size_t i, flatbuffers::FlatBufferBuilder *b)
-> flatbuffers::Offset<flatbuffers::String> {
static const char *names[] = { "bob", "fred", "bob", "fred" };
return b->CreateSharedString(names[i]);
},
&builder);
// Creating vectors of strings in one convenient call.
std::vector<std::string> names2;
names2.push_back("jane");
names2.push_back("mary");
auto vecofstrings2 = builder.CreateVectorOfStrings(names2);
// Create an array of sorted tables, can be used with binary search when read:
auto vecoftables = builder.CreateVectorOfSortedTables(mlocs, 3);
// Create an array of sorted structs,
// can be used with binary search when read:
std::vector<Ability> abilities;
abilities.push_back(Ability(4, 40));
abilities.push_back(Ability(3, 30));
abilities.push_back(Ability(2, 20));
abilities.push_back(Ability(0, 0));
auto vecofstructs = builder.CreateVectorOfSortedStructs(&abilities);
flatbuffers::Offset<Stat> mlocs_stats[1];
auto miss = builder.CreateString("miss");
StatBuilder mb_miss(builder);
mb_miss.add_id(miss);
mb_miss.add_val(0);
mb_miss.add_count(0); // key
mlocs_stats[0] = mb_miss.Finish();
auto vec_of_stats = builder.CreateVectorOfSortedTables(mlocs_stats, 1);
// Create a nested FlatBuffer.
// Nested FlatBuffers are stored in a ubyte vector, which can be convenient
// since they can be memcpy'd around much easier than other FlatBuffer
// values. They have little overhead compared to storing the table directly.
// As a test, create a mostly empty Monster buffer:
flatbuffers::FlatBufferBuilder nested_builder;
auto nmloc = CreateMonster(nested_builder, nullptr, 0, 0,
nested_builder.CreateString("NestedMonster"));
FinishMonsterBuffer(nested_builder, nmloc);
// Now we can store the buffer in the parent. Note that by default, vectors
// are only aligned to their elements or size field, so in this case if the
// buffer contains 64-bit elements, they may not be correctly aligned. We fix
// that with:
builder.ForceVectorAlignment(nested_builder.GetSize(), sizeof(uint8_t),
nested_builder.GetBufferMinAlignment());
// If for whatever reason you don't have the nested_builder available, you
// can substitute flatbuffers::largest_scalar_t (64-bit) for the alignment, or
// the largest force_align value in your schema if you're using it.
auto nested_flatbuffer_vector = builder.CreateVector(
nested_builder.GetBufferPointer(), nested_builder.GetSize());
// Test a nested FlexBuffer:
flexbuffers::Builder flexbuild;
flexbuild.Int(1234);
flexbuild.Finish();
auto flex = builder.CreateVector(flexbuild.GetBuffer());
// Test vector of enums.
Color colors[] = { Color_Blue, Color_Green };
// We use this special creation function because we have an array of
// pre-C++11 (enum class) enums whose size likely is int, yet its declared
// type in the schema is byte.
auto vecofcolors = builder.CreateVectorScalarCast<uint8_t, Color>(colors, 2);
// shortcut for creating monster with all fields set:
auto mloc = CreateMonster(
builder, &vec, 150, 80, name, inventory, Color_Blue, Any_Monster,
mlocs[1].Union(), // Store a union.
testv, vecofstrings, vecoftables, 0, nested_flatbuffer_vector, 0, false,
0, 0, 0, 0, 0, 0, 0, 0, 0, 3.14159f, 3.0f, 0.0f, vecofstrings2,
vecofstructs, flex, testv2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
AnyUniqueAliases_NONE, 0, AnyAmbiguousAliases_NONE, 0, vecofcolors,
MyGame::Example::Race_None, 0, vec_of_stats);
FinishMonsterBuffer(builder, mloc);
// clang-format off
#ifdef FLATBUFFERS_TEST_VERBOSE
// print byte data for debugging:
auto p = builder.GetBufferPointer();
for (flatbuffers::uoffset_t i = 0; i < builder.GetSize(); i++)
printf("%d ", p[i]);
#endif
// clang-format on
// return the buffer for the caller to use.
auto bufferpointer =
reinterpret_cast<const char *>(builder.GetBufferPointer());
buffer.assign(bufferpointer, bufferpointer + builder.GetSize());
return builder.Release();
}
// example of accessing a buffer loaded in memory:
void AccessFlatBufferTest(const uint8_t *flatbuf, size_t length,
bool pooled = true) {
// First, verify the buffers integrity (optional)
flatbuffers::Verifier verifier(flatbuf, length);
TEST_EQ(VerifyMonsterBuffer(verifier), true);
// clang-format off
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
std::vector<uint8_t> test_buff;
test_buff.resize(length * 2);
std::memcpy(&test_buff[0], flatbuf, length);
std::memcpy(&test_buff[length], flatbuf, length);
flatbuffers::Verifier verifier1(&test_buff[0], length);
TEST_EQ(VerifyMonsterBuffer(verifier1), true);
TEST_EQ(verifier1.GetComputedSize(), length);
flatbuffers::Verifier verifier2(&test_buff[length], length);
TEST_EQ(VerifyMonsterBuffer(verifier2), true);
TEST_EQ(verifier2.GetComputedSize(), length);
#endif
// clang-format on
TEST_EQ(strcmp(MonsterIdentifier(), "MONS"), 0);
TEST_EQ(MonsterBufferHasIdentifier(flatbuf), true);
TEST_EQ(strcmp(MonsterExtension(), "mon"), 0);
// Access the buffer from the root.
auto monster = GetMonster(flatbuf);
TEST_EQ(monster->hp(), 80);
TEST_EQ(monster->mana(), 150); // default
TEST_EQ_STR(monster->name()->c_str(), "MyMonster");
// Can't access the following field, it is deprecated in the schema,
// which means accessors are not generated:
// monster.friendly()
auto pos = monster->pos();
TEST_NOTNULL(pos);
TEST_EQ(pos->z(), 3);
TEST_EQ(pos->test3().a(), 10);
TEST_EQ(pos->test3().b(), 20);
auto inventory = monster->inventory();
TEST_EQ(VectorLength(inventory), 10UL); // Works even if inventory is null.
TEST_NOTNULL(inventory);
unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
// Check compatibilty of iterators with STL.
std::vector<unsigned char> inv_vec(inventory->begin(), inventory->end());
size_t n = 0;
for (auto it = inventory->begin(); it != inventory->end(); ++it, ++n) {
auto indx = it - inventory->begin();
TEST_EQ(*it, inv_vec.at(indx)); // Use bounds-check.
TEST_EQ(*it, inv_data[indx]);
}
TEST_EQ(n, inv_vec.size());
n = 0;
for (auto it = inventory->cbegin(); it != inventory->cend(); ++it, ++n) {
auto indx = it - inventory->cbegin();
TEST_EQ(*it, inv_vec.at(indx)); // Use bounds-check.
TEST_EQ(*it, inv_data[indx]);
}
TEST_EQ(n, inv_vec.size());
n = 0;
for (auto it = inventory->rbegin(); it != inventory->rend(); ++it, ++n) {
auto indx = inventory->rend() - it - 1;
TEST_EQ(*it, inv_vec.at(indx)); // Use bounds-check.
TEST_EQ(*it, inv_data[indx]);
}
TEST_EQ(n, inv_vec.size());
n = 0;
for (auto it = inventory->crbegin(); it != inventory->crend(); ++it, ++n) {
auto indx = inventory->crend() - it - 1;
TEST_EQ(*it, inv_vec.at(indx)); // Use bounds-check.
TEST_EQ(*it, inv_data[indx]);
}
TEST_EQ(n, inv_vec.size());
TEST_EQ(monster->color(), Color_Blue);
// Example of accessing a union:
TEST_EQ(monster->test_type(), Any_Monster); // First make sure which it is.
auto monster2 = reinterpret_cast<const Monster *>(monster->test());
TEST_NOTNULL(monster2);
TEST_EQ_STR(monster2->name()->c_str(), "Fred");
// Example of accessing a vector of strings:
auto vecofstrings = monster->testarrayofstring();
TEST_EQ(vecofstrings->size(), 4U);
TEST_EQ_STR(vecofstrings->Get(0)->c_str(), "bob");
TEST_EQ_STR(vecofstrings->Get(1)->c_str(), "fred");
if (pooled) {
// These should have pointer equality because of string pooling.
TEST_EQ(vecofstrings->Get(0)->c_str(), vecofstrings->Get(2)->c_str());
TEST_EQ(vecofstrings->Get(1)->c_str(), vecofstrings->Get(3)->c_str());
}
auto vecofstrings2 = monster->testarrayofstring2();
if (vecofstrings2) {
TEST_EQ(vecofstrings2->size(), 2U);
TEST_EQ_STR(vecofstrings2->Get(0)->c_str(), "jane");
TEST_EQ_STR(vecofstrings2->Get(1)->c_str(), "mary");
}
// Example of accessing a vector of tables:
auto vecoftables = monster->testarrayoftables();
TEST_EQ(vecoftables->size(), 3U);
for (auto it = vecoftables->begin(); it != vecoftables->end(); ++it) {
TEST_EQ(strlen(it->name()->c_str()) >= 4, true);
}
TEST_EQ_STR(vecoftables->Get(0)->name()->c_str(), "Barney");
TEST_EQ(vecoftables->Get(0)->hp(), 1000);
TEST_EQ_STR(vecoftables->Get(1)->name()->c_str(), "Fred");
TEST_EQ_STR(vecoftables->Get(2)->name()->c_str(), "Wilma");
TEST_NOTNULL(vecoftables->LookupByKey("Barney"));
TEST_NOTNULL(vecoftables->LookupByKey("Fred"));
TEST_NOTNULL(vecoftables->LookupByKey("Wilma"));
// Test accessing a vector of sorted structs
auto vecofstructs = monster->testarrayofsortedstruct();
if (vecofstructs) { // not filled in monster_test.bfbs
for (flatbuffers::uoffset_t i = 0; i < vecofstructs->size() - 1; i++) {
auto left = vecofstructs->Get(i);
auto right = vecofstructs->Get(i + 1);
TEST_EQ(true, (left->KeyCompareLessThan(right)));
}
TEST_NOTNULL(vecofstructs->LookupByKey(0)); // test default value
TEST_NOTNULL(vecofstructs->LookupByKey(3));
TEST_EQ(static_cast<const Ability *>(nullptr),
vecofstructs->LookupByKey(5));
}
if (auto vec_of_stat = monster->scalar_key_sorted_tables()) {
auto stat_0 = vec_of_stat->LookupByKey(static_cast<uint16_t>(0u));
TEST_NOTNULL(stat_0);
TEST_NOTNULL(stat_0->id());
TEST_EQ(0, stat_0->count());
TEST_EQ_STR("miss", stat_0->id()->c_str());
}
// Test nested FlatBuffers if available:
auto nested_buffer = monster->testnestedflatbuffer();
if (nested_buffer) {
// nested_buffer is a vector of bytes you can memcpy. However, if you
// actually want to access the nested data, this is a convenient
// accessor that directly gives you the root table:
auto nested_monster = monster->testnestedflatbuffer_nested_root();
TEST_EQ_STR(nested_monster->name()->c_str(), "NestedMonster");
}
// Test flexbuffer if available:
auto flex = monster->flex();
// flex is a vector of bytes you can memcpy etc.
TEST_EQ(flex->size(), 4); // Encoded FlexBuffer bytes.
// However, if you actually want to access the nested data, this is a
// convenient accessor that directly gives you the root value:
TEST_EQ(monster->flex_flexbuffer_root().AsInt16(), 1234);
// Test vector of enums:
auto colors = monster->vector_of_enums();
if (colors) {
TEST_EQ(colors->size(), 2);
TEST_EQ(colors->Get(0), Color_Blue);
TEST_EQ(colors->Get(1), Color_Green);
}
// Since Flatbuffers uses explicit mechanisms to override the default
// compiler alignment, double check that the compiler indeed obeys them:
// (Test consists of a short and byte):
TEST_EQ(flatbuffers::AlignOf<Test>(), 2UL);
TEST_EQ(sizeof(Test), 4UL);
const flatbuffers::Vector<const Test *> *tests_array[] = {
monster->test4(),
monster->test5(),
};
for (size_t i = 0; i < sizeof(tests_array) / sizeof(tests_array[0]); ++i) {
auto tests = tests_array[i];
TEST_NOTNULL(tests);
auto test_0 = tests->Get(0);
auto test_1 = tests->Get(1);
TEST_EQ(test_0->a(), 10);
TEST_EQ(test_0->b(), 20);
TEST_EQ(test_1->a(), 30);
TEST_EQ(test_1->b(), 40);
for (auto it = tests->begin(); it != tests->end(); ++it) {
TEST_EQ(it->a() == 10 || it->a() == 30, true); // Just testing iterators.
}
}
// Checking for presence of fields:
TEST_EQ(flatbuffers::IsFieldPresent(monster, Monster::VT_HP), true);
TEST_EQ(flatbuffers::IsFieldPresent(monster, Monster::VT_MANA), false);
// Obtaining a buffer from a root:
TEST_EQ(GetBufferStartFromRootPointer(monster), flatbuf);
}
// Change a FlatBuffer in-place, after it has been constructed.
void MutateFlatBuffersTest(uint8_t *flatbuf, std::size_t length) {
// Get non-const pointer to root.
auto monster = GetMutableMonster(flatbuf);
// Each of these tests mutates, then tests, then set back to the original,
// so we can test that the buffer in the end still passes our original test.
auto hp_ok = monster->mutate_hp(10);
TEST_EQ(hp_ok, true); // Field was present.
TEST_EQ(monster->hp(), 10);
// Mutate to default value
auto hp_ok_default = monster->mutate_hp(100);
TEST_EQ(hp_ok_default, true); // Field was present.
TEST_EQ(monster->hp(), 100);
// Test that mutate to default above keeps field valid for further mutations
auto hp_ok_2 = monster->mutate_hp(20);
TEST_EQ(hp_ok_2, true);
TEST_EQ(monster->hp(), 20);
monster->mutate_hp(80);
// Monster originally at 150 mana (default value)
auto mana_default_ok = monster->mutate_mana(150); // Mutate to default value.
TEST_EQ(mana_default_ok,
true); // Mutation should succeed, because default value.
TEST_EQ(monster->mana(), 150);
auto mana_ok = monster->mutate_mana(10);
TEST_EQ(mana_ok, false); // Field was NOT present, because default value.
TEST_EQ(monster->mana(), 150);
// Mutate structs.
auto pos = monster->mutable_pos();
auto test3 = pos->mutable_test3(); // Struct inside a struct.
test3.mutate_a(50); // Struct fields never fail.
TEST_EQ(test3.a(), 50);
test3.mutate_a(10);
// Mutate vectors.
auto inventory = monster->mutable_inventory();
inventory->Mutate(9, 100);
TEST_EQ(inventory->Get(9), 100);
inventory->Mutate(9, 9);
auto tables = monster->mutable_testarrayoftables();
auto first = tables->GetMutableObject(0);
TEST_EQ(first->hp(), 1000);
first->mutate_hp(0);
TEST_EQ(first->hp(), 0);
first->mutate_hp(1000);
// Run the verifier and the regular test to make sure we didn't trample on
// anything.
AccessFlatBufferTest(flatbuf, length);
}
// Unpack a FlatBuffer into objects.
void ObjectFlatBuffersTest(uint8_t *flatbuf) {
// Optional: we can specify resolver and rehasher functions to turn hashed
// strings into object pointers and back, to implement remote references
// and such.
auto resolver = flatbuffers::resolver_function_t(
[](void **pointer_adr, flatbuffers::hash_value_t hash) {
(void)pointer_adr;
(void)hash;
// Don't actually do anything, leave variable null.
});
auto rehasher = flatbuffers::rehasher_function_t(
[](void *pointer) -> flatbuffers::hash_value_t {
(void)pointer;
return 0;
});
// Turn a buffer into C++ objects.
auto monster1 = UnPackMonster(flatbuf, &resolver);
// Re-serialize the data.
flatbuffers::FlatBufferBuilder fbb1;
fbb1.Finish(CreateMonster(fbb1, monster1.get(), &rehasher),
MonsterIdentifier());
// Unpack again, and re-serialize again.
auto monster2 = UnPackMonster(fbb1.GetBufferPointer(), &resolver);
flatbuffers::FlatBufferBuilder fbb2;
fbb2.Finish(CreateMonster(fbb2, monster2.get(), &rehasher),
MonsterIdentifier());
// Now we've gone full round-trip, the two buffers should match.
auto len1 = fbb1.GetSize();
auto len2 = fbb2.GetSize();
TEST_EQ(len1, len2);
TEST_EQ(memcmp(fbb1.GetBufferPointer(), fbb2.GetBufferPointer(), len1), 0);
// Test it with the original buffer test to make sure all data survived.
AccessFlatBufferTest(fbb2.GetBufferPointer(), len2, false);
// Test accessing fields, similar to AccessFlatBufferTest above.
TEST_EQ(monster2->hp, 80);
TEST_EQ(monster2->mana, 150); // default
TEST_EQ_STR(monster2->name.c_str(), "MyMonster");
auto &pos = monster2->pos;
TEST_NOTNULL(pos);
TEST_EQ(pos->z(), 3);
TEST_EQ(pos->test3().a(), 10);
TEST_EQ(pos->test3().b(), 20);
auto &inventory = monster2->inventory;
TEST_EQ(inventory.size(), 10UL);
unsigned char inv_data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
for (auto it = inventory.begin(); it != inventory.end(); ++it)
TEST_EQ(*it, inv_data[it - inventory.begin()]);
TEST_EQ(monster2->color, Color_Blue);
auto monster3 = monster2->test.AsMonster();
TEST_NOTNULL(monster3);
TEST_EQ_STR(monster3->name.c_str(), "Fred");
auto &vecofstrings = monster2->testarrayofstring;
TEST_EQ(vecofstrings.size(), 4U);
TEST_EQ_STR(vecofstrings[0].c_str(), "bob");
TEST_EQ_STR(vecofstrings[1].c_str(), "fred");
auto &vecofstrings2 = monster2->testarrayofstring2;
TEST_EQ(vecofstrings2.size(), 2U);
TEST_EQ_STR(vecofstrings2[0].c_str(), "jane");
TEST_EQ_STR(vecofstrings2[1].c_str(), "mary");
auto &vecoftables = monster2->testarrayoftables;
TEST_EQ(vecoftables.size(), 3U);
TEST_EQ_STR(vecoftables[0]->name.c_str(), "Barney");
TEST_EQ(vecoftables[0]->hp, 1000);
TEST_EQ_STR(vecoftables[1]->name.c_str(), "Fred");
TEST_EQ_STR(vecoftables[2]->name.c_str(), "Wilma");
auto &tests = monster2->test4;
TEST_EQ(tests[0].a(), 10);
TEST_EQ(tests[0].b(), 20);
TEST_EQ(tests[1].a(), 30);
TEST_EQ(tests[1].b(), 40);
}
// Prefix a FlatBuffer with a size field.
void SizePrefixedTest() {
// Create size prefixed buffer.
flatbuffers::FlatBufferBuilder fbb;
FinishSizePrefixedMonsterBuffer(
fbb, CreateMonster(fbb, 0, 200, 300, fbb.CreateString("bob")));
// Verify it.
flatbuffers::Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize());
TEST_EQ(VerifySizePrefixedMonsterBuffer(verifier), true);
// Access it.
auto m = GetSizePrefixedMonster(fbb.GetBufferPointer());
TEST_EQ(m->mana(), 200);
TEST_EQ(m->hp(), 300);
TEST_EQ_STR(m->name()->c_str(), "bob");
}
void TriviallyCopyableTest() {
// clang-format off
#if __GNUG__ && __GNUC__ < 5
TEST_EQ(__has_trivial_copy(Vec3), true);
#else
#if __cplusplus >= 201103L
TEST_EQ(std::is_trivially_copyable<Vec3>::value, true);
#endif
#endif
// clang-format on
}
// Check stringify of an default enum value to json
void JsonDefaultTest() {
// load FlatBuffer schema (.fbs) from disk
std::string schemafile;
TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.fbs").c_str(),
false, &schemafile),
true);
// parse schema first, so we can use it to parse the data after
flatbuffers::Parser parser;
auto include_test_path =
flatbuffers::ConCatPathFileName(test_data_path, "include_test");
const char *include_directories[] = { test_data_path.c_str(),
include_test_path.c_str(), nullptr };
TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true);
// create incomplete monster and store to json
parser.opts.output_default_scalars_in_json = true;
parser.opts.output_enum_identifiers = true;
flatbuffers::FlatBufferBuilder builder;
auto name = builder.CreateString("default_enum");
MonsterBuilder color_monster(builder);
color_monster.add_name(name);
FinishMonsterBuffer(builder, color_monster.Finish());
std::string jsongen;
auto result = GenerateText(parser, builder.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
// default value of the "color" field is Blue
TEST_EQ(std::string::npos != jsongen.find("color: \"Blue\""), true);
// default value of the "testf" field is 3.14159
TEST_EQ(std::string::npos != jsongen.find("testf: 3.14159"), true);
}
void JsonEnumsTest() {
// load FlatBuffer schema (.fbs) from disk
std::string schemafile;
TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.fbs").c_str(),
false, &schemafile),
true);
// parse schema first, so we can use it to parse the data after
flatbuffers::Parser parser;
auto include_test_path =
flatbuffers::ConCatPathFileName(test_data_path, "include_test");
const char *include_directories[] = { test_data_path.c_str(),
include_test_path.c_str(), nullptr };
parser.opts.output_enum_identifiers = true;
TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true);
flatbuffers::FlatBufferBuilder builder;
auto name = builder.CreateString("bitflag_enum");
MonsterBuilder color_monster(builder);
color_monster.add_name(name);
color_monster.add_color(Color(Color_Blue | Color_Red));
FinishMonsterBuffer(builder, color_monster.Finish());
std::string jsongen;
auto result = GenerateText(parser, builder.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ(std::string::npos != jsongen.find("color: \"Red Blue\""), true);
// Test forward compatibility with 'output_enum_identifiers = true'.
// Current Color doesn't have '(1u << 2)' field, let's add it.
builder.Clear();
std::string future_json;
auto future_name = builder.CreateString("future bitflag_enum");
MonsterBuilder future_color(builder);
future_color.add_name(future_name);
future_color.add_color(
static_cast<Color>((1u << 2) | Color_Blue | Color_Red));
FinishMonsterBuffer(builder, future_color.Finish());
result = GenerateText(parser, builder.GetBufferPointer(), &future_json);
TEST_EQ(result, true);
TEST_EQ(std::string::npos != future_json.find("color: 13"), true);
}
#if defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// The IEEE-754 quiet_NaN is not simple binary constant.
// All binary NaN bit strings have all the bits of the biased exponent field E
// set to 1. A quiet NaN bit string should be encoded with the first bit d[1]
// of the trailing significand field T being 1 (d[0] is implicit bit).
// It is assumed that endianness of floating-point is same as integer.
template<typename T, typename U, U qnan_base> bool is_quiet_nan_impl(T v) {
static_assert(sizeof(T) == sizeof(U), "unexpected");
U b = 0;
std::memcpy(&b, &v, sizeof(T));
return ((b & qnan_base) == qnan_base);
}
# if defined(__mips__) || defined(__hppa__)
static bool is_quiet_nan(float v) {
return is_quiet_nan_impl<float, uint32_t, 0x7FC00000u>(v) ||
is_quiet_nan_impl<float, uint32_t, 0x7FBFFFFFu>(v);
}
static bool is_quiet_nan(double v) {
return is_quiet_nan_impl<double, uint64_t, 0x7FF8000000000000ul>(v) ||
is_quiet_nan_impl<double, uint64_t, 0x7FF7FFFFFFFFFFFFu>(v);
}
# else
static bool is_quiet_nan(float v) {
return is_quiet_nan_impl<float, uint32_t, 0x7FC00000u>(v);
}
static bool is_quiet_nan(double v) {
return is_quiet_nan_impl<double, uint64_t, 0x7FF8000000000000ul>(v);
}
# endif
void TestMonsterExtraFloats() {
TEST_EQ(is_quiet_nan(1.0), false);
TEST_EQ(is_quiet_nan(infinity_d), false);
TEST_EQ(is_quiet_nan(-infinity_f), false);
TEST_EQ(is_quiet_nan(std::numeric_limits<float>::quiet_NaN()), true);
TEST_EQ(is_quiet_nan(std::numeric_limits<double>::quiet_NaN()), true);
using namespace flatbuffers;
using namespace MyGame;
// Load FlatBuffer schema (.fbs) from disk.
std::string schemafile;
TEST_EQ(LoadFile((test_data_path + "monster_extra.fbs").c_str(), false,
&schemafile),
true);
// Parse schema first, so we can use it to parse the data after.
Parser parser;
auto include_test_path = ConCatPathFileName(test_data_path, "include_test");
const char *include_directories[] = { test_data_path.c_str(),
include_test_path.c_str(), nullptr };
TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true);
// Create empty extra and store to json.
parser.opts.output_default_scalars_in_json = true;
parser.opts.output_enum_identifiers = true;
FlatBufferBuilder builder;
const auto def_root = MonsterExtraBuilder(builder).Finish();
FinishMonsterExtraBuffer(builder, def_root);
const auto def_obj = builder.GetBufferPointer();
const auto def_extra = GetMonsterExtra(def_obj);
TEST_NOTNULL(def_extra);
TEST_EQ(is_quiet_nan(def_extra->f0()), true);
TEST_EQ(is_quiet_nan(def_extra->f1()), true);
TEST_EQ(def_extra->f2(), +infinity_f);
TEST_EQ(def_extra->f3(), -infinity_f);
TEST_EQ(is_quiet_nan(def_extra->d0()), true);
TEST_EQ(is_quiet_nan(def_extra->d1()), true);
TEST_EQ(def_extra->d2(), +infinity_d);
TEST_EQ(def_extra->d3(), -infinity_d);
std::string jsongen;
auto result = GenerateText(parser, def_obj, &jsongen);
TEST_EQ(result, true);
// Check expected default values.
TEST_EQ(std::string::npos != jsongen.find("f0: nan"), true);
TEST_EQ(std::string::npos != jsongen.find("f1: nan"), true);
TEST_EQ(std::string::npos != jsongen.find("f2: inf"), true);
TEST_EQ(std::string::npos != jsongen.find("f3: -inf"), true);
TEST_EQ(std::string::npos != jsongen.find("d0: nan"), true);
TEST_EQ(std::string::npos != jsongen.find("d1: nan"), true);
TEST_EQ(std::string::npos != jsongen.find("d2: inf"), true);
TEST_EQ(std::string::npos != jsongen.find("d3: -inf"), true);
// Parse 'mosterdata_extra.json'.
const auto extra_base = test_data_path + "monsterdata_extra";
jsongen = "";
TEST_EQ(LoadFile((extra_base + ".json").c_str(), false, &jsongen), true);
TEST_EQ(parser.Parse(jsongen.c_str()), true);
const auto test_file = parser.builder_.GetBufferPointer();
const auto test_size = parser.builder_.GetSize();
Verifier verifier(test_file, test_size);
TEST_ASSERT(VerifyMonsterExtraBuffer(verifier));
const auto extra = GetMonsterExtra(test_file);
TEST_NOTNULL(extra);
TEST_EQ(is_quiet_nan(extra->f0()), true);
TEST_EQ(is_quiet_nan(extra->f1()), true);
TEST_EQ(extra->f2(), +infinity_f);
TEST_EQ(extra->f3(), -infinity_f);
TEST_EQ(is_quiet_nan(extra->d0()), true);
TEST_EQ(extra->d1(), +infinity_d);
TEST_EQ(extra->d2(), -infinity_d);
TEST_EQ(is_quiet_nan(extra->d3()), true);
TEST_NOTNULL(extra->fvec());
TEST_EQ(extra->fvec()->size(), 4);
TEST_EQ(extra->fvec()->Get(0), 1.0f);
TEST_EQ(extra->fvec()->Get(1), -infinity_f);
TEST_EQ(extra->fvec()->Get(2), +infinity_f);
TEST_EQ(is_quiet_nan(extra->fvec()->Get(3)), true);
TEST_NOTNULL(extra->dvec());
TEST_EQ(extra->dvec()->size(), 4);
TEST_EQ(extra->dvec()->Get(0), 2.0);
TEST_EQ(extra->dvec()->Get(1), +infinity_d);
TEST_EQ(extra->dvec()->Get(2), -infinity_d);
TEST_EQ(is_quiet_nan(extra->dvec()->Get(3)), true);
}
#else
void TestMonsterExtraFloats() {}
#endif
// example of parsing text straight into a buffer, and generating
// text back from it:
void ParseAndGenerateTextTest(bool binary) {
// load FlatBuffer schema (.fbs) and JSON from disk
std::string schemafile;
std::string jsonfile;
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "monster_test." + (binary ? "bfbs" : "fbs"))
.c_str(),
binary, &schemafile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "monsterdata_test.golden").c_str(), false,
&jsonfile),
true);
auto include_test_path =
flatbuffers::ConCatPathFileName(test_data_path, "include_test");
const char *include_directories[] = { test_data_path.c_str(),
include_test_path.c_str(), nullptr };
// parse schema first, so we can use it to parse the data after
flatbuffers::Parser parser;
if (binary) {
flatbuffers::Verifier verifier(
reinterpret_cast<const uint8_t *>(schemafile.c_str()),
schemafile.size());
TEST_EQ(reflection::VerifySchemaBuffer(verifier), true);
// auto schema = reflection::GetSchema(schemafile.c_str());
TEST_EQ(parser.Deserialize((const uint8_t *)schemafile.c_str(),
schemafile.size()),
true);
} else {
TEST_EQ(parser.Parse(schemafile.c_str(), include_directories), true);
}
TEST_EQ(parser.ParseJson(jsonfile.c_str()), true);
// here, parser.builder_ contains a binary buffer that is the parsed data.
// First, verify it, just in case:
flatbuffers::Verifier verifier(parser.builder_.GetBufferPointer(),
parser.builder_.GetSize());
TEST_EQ(VerifyMonsterBuffer(verifier), true);
AccessFlatBufferTest(parser.builder_.GetBufferPointer(),
parser.builder_.GetSize(), false);
// to ensure it is correct, we now generate text back from the binary,
// and compare the two:
std::string jsongen;
auto result =
GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(jsongen.c_str(), jsonfile.c_str());
// We can also do the above using the convenient Registry that knows about
// a set of file_identifiers mapped to schemas.
flatbuffers::Registry registry;
// Make sure schemas can find their includes.
registry.AddIncludeDirectory(test_data_path.c_str());
registry.AddIncludeDirectory(include_test_path.c_str());
// Call this with many schemas if possible.
registry.Register(MonsterIdentifier(),
(test_data_path + "monster_test.fbs").c_str());
// Now we got this set up, we can parse by just specifying the identifier,
// the correct schema will be loaded on the fly:
auto buf = registry.TextToFlatBuffer(jsonfile.c_str(), MonsterIdentifier());
// If this fails, check registry.lasterror_.
TEST_NOTNULL(buf.data());
// Test the buffer, to be sure:
AccessFlatBufferTest(buf.data(), buf.size(), false);
// We can use the registry to turn this back into text, in this case it
// will get the file_identifier from the binary:
std::string text;
auto ok = registry.FlatBufferToText(buf.data(), buf.size(), &text);
// If this fails, check registry.lasterror_.
TEST_EQ(ok, true);
TEST_EQ_STR(text.c_str(), jsonfile.c_str());
// Generate text for UTF-8 strings without escapes.
std::string jsonfile_utf8;
TEST_EQ(flatbuffers::LoadFile((test_data_path + "unicode_test.json").c_str(),
false, &jsonfile_utf8),
true);
TEST_EQ(parser.Parse(jsonfile_utf8.c_str(), include_directories), true);
// To ensure it is correct, generate utf-8 text back from the binary.
std::string jsongen_utf8;
// request natural printing for utf-8 strings
parser.opts.natural_utf8 = true;
parser.opts.strict_json = true;
TEST_EQ(
GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen_utf8),
true);
TEST_EQ_STR(jsongen_utf8.c_str(), jsonfile_utf8.c_str());
}
void ReflectionTest(uint8_t *flatbuf, size_t length) {
// Load a binary schema.
std::string bfbsfile;
TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.bfbs").c_str(),
true, &bfbsfile),
true);
// Verify it, just in case:
flatbuffers::Verifier verifier(
reinterpret_cast<const uint8_t *>(bfbsfile.c_str()), bfbsfile.length());
TEST_EQ(reflection::VerifySchemaBuffer(verifier), true);
// Make sure the schema is what we expect it to be.
auto &schema = *reflection::GetSchema(bfbsfile.c_str());
auto root_table = schema.root_table();
TEST_EQ_STR(root_table->name()->c_str(), "MyGame.Example.Monster");
auto fields = root_table->fields();
auto hp_field_ptr = fields->LookupByKey("hp");
TEST_NOTNULL(hp_field_ptr);
auto &hp_field = *hp_field_ptr;
TEST_EQ_STR(hp_field.name()->c_str(), "hp");
TEST_EQ(hp_field.id(), 2);
TEST_EQ(hp_field.type()->base_type(), reflection::Short);
auto friendly_field_ptr = fields->LookupByKey("friendly");
TEST_NOTNULL(friendly_field_ptr);
TEST_NOTNULL(friendly_field_ptr->attributes());
TEST_NOTNULL(friendly_field_ptr->attributes()->LookupByKey("priority"));
// Make sure the table index is what we expect it to be.
auto pos_field_ptr = fields->LookupByKey("pos");
TEST_NOTNULL(pos_field_ptr);
TEST_EQ(pos_field_ptr->type()->base_type(), reflection::Obj);
auto pos_table_ptr = schema.objects()->Get(pos_field_ptr->type()->index());
TEST_NOTNULL(pos_table_ptr);
TEST_EQ_STR(pos_table_ptr->name()->c_str(), "MyGame.Example.Vec3");
// Test nullability of fields: hp is a 0-default scalar, pos is a struct =>
// optional, and name is a required string => not optional.
TEST_EQ(hp_field.optional(), false);
TEST_EQ(pos_field_ptr->optional(), true);
TEST_EQ(fields->LookupByKey("name")->optional(), false);
// Now use it to dynamically access a buffer.
auto &root = *flatbuffers::GetAnyRoot(flatbuf);
// Verify the buffer first using reflection based verification
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), flatbuf, length),
true);
auto hp = flatbuffers::GetFieldI<uint16_t>(root, hp_field);
TEST_EQ(hp, 80);
// Rather than needing to know the type, we can also get the value of
// any field as an int64_t/double/string, regardless of what it actually is.
auto hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 80);
auto hp_double = flatbuffers::GetAnyFieldF(root, hp_field);
TEST_EQ(hp_double, 80.0);
auto hp_string = flatbuffers::GetAnyFieldS(root, hp_field, &schema);
TEST_EQ_STR(hp_string.c_str(), "80");
// Get struct field through reflection
auto pos_struct = flatbuffers::GetFieldStruct(root, *pos_field_ptr);
TEST_NOTNULL(pos_struct);
TEST_EQ(flatbuffers::GetAnyFieldF(*pos_struct,
*pos_table_ptr->fields()->LookupByKey("z")),
3.0f);
auto test3_field = pos_table_ptr->fields()->LookupByKey("test3");
auto test3_struct = flatbuffers::GetFieldStruct(*pos_struct, *test3_field);
TEST_NOTNULL(test3_struct);
auto test3_object = schema.objects()->Get(test3_field->type()->index());
TEST_EQ(flatbuffers::GetAnyFieldF(*test3_struct,
*test3_object->fields()->LookupByKey("a")),
10);
// We can also modify it.
flatbuffers::SetField<uint16_t>(&root, hp_field, 200);
hp = flatbuffers::GetFieldI<uint16_t>(root, hp_field);
TEST_EQ(hp, 200);
// We can also set fields generically:
flatbuffers::SetAnyFieldI(&root, hp_field, 300);
hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 300);
flatbuffers::SetAnyFieldF(&root, hp_field, 300.5);
hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 300);
flatbuffers::SetAnyFieldS(&root, hp_field, "300");
hp_int64 = flatbuffers::GetAnyFieldI(root, hp_field);
TEST_EQ(hp_int64, 300);
// Test buffer is valid after the modifications
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(), flatbuf, length),
true);
// Reset it, for further tests.
flatbuffers::SetField<uint16_t>(&root, hp_field, 80);
// More advanced functionality: changing the size of items in-line!
// First we put the FlatBuffer inside an std::vector.
std::vector<uint8_t> resizingbuf(flatbuf, flatbuf + length);
// Find the field we want to modify.
auto &name_field = *fields->LookupByKey("name");
// Get the root.
// This time we wrap the result from GetAnyRoot in a smartpointer that
// will keep rroot valid as resizingbuf resizes.
auto rroot = flatbuffers::piv(
flatbuffers::GetAnyRoot(flatbuffers::vector_data(resizingbuf)),
resizingbuf);
SetString(schema, "totally new string", GetFieldS(**rroot, name_field),
&resizingbuf);
// Here resizingbuf has changed, but rroot is still valid.
TEST_EQ_STR(GetFieldS(**rroot, name_field)->c_str(), "totally new string");
// Now lets extend a vector by 100 elements (10 -> 110).
auto &inventory_field = *fields->LookupByKey("inventory");
auto rinventory = flatbuffers::piv(
flatbuffers::GetFieldV<uint8_t>(**rroot, inventory_field), resizingbuf);
flatbuffers::ResizeVector<uint8_t>(schema, 110, 50, *rinventory,
&resizingbuf);
// rinventory still valid, so lets read from it.
TEST_EQ(rinventory->Get(10), 50);
// For reflection uses not covered already, there is a more powerful way:
// we can simply generate whatever object we want to add/modify in a
// FlatBuffer of its own, then add that to an existing FlatBuffer:
// As an example, let's add a string to an array of strings.
// First, find our field:
auto &testarrayofstring_field = *fields->LookupByKey("testarrayofstring");
// Find the vector value:
auto rtestarrayofstring = flatbuffers::piv(
flatbuffers::GetFieldV<flatbuffers::Offset<flatbuffers::String>>(
**rroot, testarrayofstring_field),
resizingbuf);
// It's a vector of 2 strings, to which we add one more, initialized to
// offset 0.
flatbuffers::ResizeVector<flatbuffers::Offset<flatbuffers::String>>(
schema, 3, 0, *rtestarrayofstring, &resizingbuf);
// Here we just create a buffer that contans a single string, but this
// could also be any complex set of tables and other values.
flatbuffers::FlatBufferBuilder stringfbb;
stringfbb.Finish(stringfbb.CreateString("hank"));
// Add the contents of it to our existing FlatBuffer.
// We do this last, so the pointer doesn't get invalidated (since it is
// at the end of the buffer):
auto string_ptr = flatbuffers::AddFlatBuffer(
resizingbuf, stringfbb.GetBufferPointer(), stringfbb.GetSize());
// Finally, set the new value in the vector.
rtestarrayofstring->MutateOffset(2, string_ptr);
TEST_EQ_STR(rtestarrayofstring->Get(0)->c_str(), "bob");
TEST_EQ_STR(rtestarrayofstring->Get(2)->c_str(), "hank");
// Test integrity of all resize operations above.
flatbuffers::Verifier resize_verifier(
reinterpret_cast<const uint8_t *>(flatbuffers::vector_data(resizingbuf)),
resizingbuf.size());
TEST_EQ(VerifyMonsterBuffer(resize_verifier), true);
// Test buffer is valid using reflection as well
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(),
flatbuffers::vector_data(resizingbuf),
resizingbuf.size()),
true);
// As an additional test, also set it on the name field.
// Note: unlike the name change above, this just overwrites the offset,
// rather than changing the string in-place.
SetFieldT(*rroot, name_field, string_ptr);
TEST_EQ_STR(GetFieldS(**rroot, name_field)->c_str(), "hank");
// Using reflection, rather than mutating binary FlatBuffers, we can also copy
// tables and other things out of other FlatBuffers into a FlatBufferBuilder,
// either part or whole.
flatbuffers::FlatBufferBuilder fbb;
auto root_offset = flatbuffers::CopyTable(
fbb, schema, *root_table, *flatbuffers::GetAnyRoot(flatbuf), true);
fbb.Finish(root_offset, MonsterIdentifier());
// Test that it was copied correctly:
AccessFlatBufferTest(fbb.GetBufferPointer(), fbb.GetSize());
// Test buffer is valid using reflection as well
TEST_EQ(flatbuffers::Verify(schema, *schema.root_table(),
fbb.GetBufferPointer(), fbb.GetSize()),
true);
}
void MiniReflectFlatBuffersTest(uint8_t *flatbuf) {
auto s =
flatbuffers::FlatBufferToString(flatbuf, Monster::MiniReflectTypeTable());
TEST_EQ_STR(
s.c_str(),
"{ "
"pos: { x: 1.0, y: 2.0, z: 3.0, test1: 0.0, test2: Red, test3: "
"{ a: 10, b: 20 } }, "
"hp: 80, "
"name: \"MyMonster\", "
"inventory: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "
"test_type: Monster, "
"test: { name: \"Fred\" }, "
"test4: [ { a: 10, b: 20 }, { a: 30, b: 40 } ], "
"testarrayofstring: [ \"bob\", \"fred\", \"bob\", \"fred\" ], "
"testarrayoftables: [ { hp: 1000, name: \"Barney\" }, { name: \"Fred\" "
"}, "
"{ name: \"Wilma\" } ], "
// TODO(wvo): should really print this nested buffer correctly.
"testnestedflatbuffer: [ 20, 0, 0, 0, 77, 79, 78, 83, 12, 0, 12, 0, 0, "
"0, "
"4, 0, 6, 0, 8, 0, 12, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 13, 0, 0, 0, 78, "
"101, 115, 116, 101, 100, 77, 111, 110, 115, 116, 101, 114, 0, 0, 0 ], "
"testarrayofstring2: [ \"jane\", \"mary\" ], "
"testarrayofsortedstruct: [ { id: 0, distance: 0 }, "
"{ id: 2, distance: 20 }, { id: 3, distance: 30 }, "
"{ id: 4, distance: 40 } ], "
"flex: [ 210, 4, 5, 2 ], "
"test5: [ { a: 10, b: 20 }, { a: 30, b: 40 } ], "
"vector_of_enums: [ Blue, Green ], "
"scalar_key_sorted_tables: [ { id: \"miss\" } ] "
"}");
Test test(16, 32);
Vec3 vec(1, 2, 3, 1.5, Color_Red, test);
flatbuffers::FlatBufferBuilder vec_builder;
vec_builder.Finish(vec_builder.CreateStruct(vec));
auto vec_buffer = vec_builder.Release();
auto vec_str = flatbuffers::FlatBufferToString(vec_buffer.data(),
Vec3::MiniReflectTypeTable());
TEST_EQ_STR(vec_str.c_str(),
"{ x: 1.0, y: 2.0, z: 3.0, test1: 1.5, test2: Red, test3: { a: "
"16, b: 32 } }");
}
void MiniReflectFixedLengthArrayTest() {
// VS10 does not support typed enums, exclude from tests
#if !defined(_MSC_VER) || _MSC_VER >= 1700
flatbuffers::FlatBufferBuilder fbb;
MyGame::Example::ArrayStruct aStruct(2, 12, 1);
auto aTable = MyGame::Example::CreateArrayTable(fbb, &aStruct);
fbb.Finish(aTable);
auto flatbuf = fbb.Release();
auto s = flatbuffers::FlatBufferToString(
flatbuf.data(), MyGame::Example::ArrayTableTypeTable());
TEST_EQ_STR(
"{ "
"a: { a: 2.0, "
"b: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], "
"c: 12, "
"d: [ { a: [ 0, 0 ], b: A, c: [ A, A ], d: [ 0, 0 ] }, "
"{ a: [ 0, 0 ], b: A, c: [ A, A ], d: [ 0, 0 ] } ], "
"e: 1, f: [ 0, 0 ] } "
"}",
s.c_str());
#endif
}
// Parse a .proto schema, output as .fbs
void ParseProtoTest() {
// load the .proto and the golden file from disk
std::string protofile;
std::string goldenfile;
std::string goldenunionfile;
TEST_EQ(
flatbuffers::LoadFile((test_data_path + "prototest/test.proto").c_str(),
false, &protofile),
true);
TEST_EQ(
flatbuffers::LoadFile((test_data_path + "prototest/test.golden").c_str(),
false, &goldenfile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "prototest/test_union.golden").c_str(), false,
&goldenunionfile),
true);
flatbuffers::IDLOptions opts;
opts.include_dependence_headers = false;
opts.proto_mode = true;
// Parse proto.
flatbuffers::Parser parser(opts);
auto protopath = test_data_path + "prototest/";
const char *include_directories[] = { protopath.c_str(), nullptr };
TEST_EQ(parser.Parse(protofile.c_str(), include_directories), true);
// Generate fbs.
auto fbs = flatbuffers::GenerateFBS(parser, "test");
// Ensure generated file is parsable.
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse(fbs.c_str(), nullptr), true);
TEST_EQ_STR(fbs.c_str(), goldenfile.c_str());
// Parse proto with --oneof-union option.
opts.proto_oneof_union = true;
flatbuffers::Parser parser3(opts);
TEST_EQ(parser3.Parse(protofile.c_str(), include_directories), true);
// Generate fbs.
auto fbs_union = flatbuffers::GenerateFBS(parser3, "test");
// Ensure generated file is parsable.
flatbuffers::Parser parser4;
TEST_EQ(parser4.Parse(fbs_union.c_str(), nullptr), true);
TEST_EQ_STR(fbs_union.c_str(), goldenunionfile.c_str());
}
// Parse a .proto schema, output as .fbs
void ParseProtoTestWithSuffix() {
// load the .proto and the golden file from disk
std::string protofile;
std::string goldenfile;
std::string goldenunionfile;
TEST_EQ(
flatbuffers::LoadFile((test_data_path + "prototest/test.proto").c_str(),
false, &protofile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "prototest/test_suffix.golden").c_str(), false,
&goldenfile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "prototest/test_union_suffix.golden").c_str(),
false, &goldenunionfile),
true);
flatbuffers::IDLOptions opts;
opts.include_dependence_headers = false;
opts.proto_mode = true;
opts.proto_namespace_suffix = "test_namespace_suffix";
// Parse proto.
flatbuffers::Parser parser(opts);
auto protopath = test_data_path + "prototest/";
const char *include_directories[] = { protopath.c_str(), nullptr };
TEST_EQ(parser.Parse(protofile.c_str(), include_directories), true);
// Generate fbs.
auto fbs = flatbuffers::GenerateFBS(parser, "test");
// Ensure generated file is parsable.
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse(fbs.c_str(), nullptr), true);
TEST_EQ_STR(fbs.c_str(), goldenfile.c_str());
// Parse proto with --oneof-union option.
opts.proto_oneof_union = true;
flatbuffers::Parser parser3(opts);
TEST_EQ(parser3.Parse(protofile.c_str(), include_directories), true);
// Generate fbs.
auto fbs_union = flatbuffers::GenerateFBS(parser3, "test");
// Ensure generated file is parsable.
flatbuffers::Parser parser4;
TEST_EQ(parser4.Parse(fbs_union.c_str(), nullptr), true);
TEST_EQ_STR(fbs_union.c_str(), goldenunionfile.c_str());
}
// Parse a .proto schema, output as .fbs
void ParseProtoTestWithIncludes() {
// load the .proto and the golden file from disk
std::string protofile;
std::string goldenfile;
std::string goldenunionfile;
std::string importprotofile;
TEST_EQ(
flatbuffers::LoadFile((test_data_path + "prototest/test.proto").c_str(),
false, &protofile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "prototest/imported.proto").c_str(), false,
&importprotofile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "prototest/test_include.golden").c_str(), false,
&goldenfile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "prototest/test_union_include.golden").c_str(),
false, &goldenunionfile),
true);
flatbuffers::IDLOptions opts;
opts.include_dependence_headers = true;
opts.proto_mode = true;
// Parse proto.
flatbuffers::Parser parser(opts);
auto protopath = test_data_path + "prototest/";
const char *include_directories[] = { protopath.c_str(), nullptr };
TEST_EQ(parser.Parse(protofile.c_str(), include_directories), true);
// Generate fbs.
auto fbs = flatbuffers::GenerateFBS(parser, "test");
// Generate fbs from import.proto
flatbuffers::Parser import_parser(opts);
TEST_EQ(import_parser.Parse(importprotofile.c_str(), include_directories),
true);
auto import_fbs = flatbuffers::GenerateFBS(import_parser, "test");
// Ensure generated file is parsable.
flatbuffers::Parser parser2;
TEST_EQ(
parser2.Parse(import_fbs.c_str(), include_directories, "imported.fbs"),
true);
TEST_EQ(parser2.Parse(fbs.c_str(), nullptr), true);
TEST_EQ_STR(fbs.c_str(), goldenfile.c_str());
// Parse proto with --oneof-union option.
opts.proto_oneof_union = true;
flatbuffers::Parser parser3(opts);
TEST_EQ(parser3.Parse(protofile.c_str(), include_directories), true);
// Generate fbs.
auto fbs_union = flatbuffers::GenerateFBS(parser3, "test");
// Ensure generated file is parsable.
flatbuffers::Parser parser4;
TEST_EQ(parser4.Parse(import_fbs.c_str(), nullptr, "imported.fbs"), true);
TEST_EQ(parser4.Parse(fbs_union.c_str(), nullptr), true);
TEST_EQ_STR(fbs_union.c_str(), goldenunionfile.c_str());
}
template<typename T>
void CompareTableFieldValue(flatbuffers::Table *table,
flatbuffers::voffset_t voffset, T val) {
T read = table->GetField(voffset, static_cast<T>(0));
TEST_EQ(read, val);
}
// Low level stress/fuzz test: serialize/deserialize a variety of
// different kinds of data in different combinations
void FuzzTest1() {
// Values we're testing against: chosen to ensure no bits get chopped
// off anywhere, and also be different from eachother.
const uint8_t bool_val = true;
const int8_t char_val = -127; // 0x81
const uint8_t uchar_val = 0xFF;
const int16_t short_val = -32222; // 0x8222;
const uint16_t ushort_val = 0xFEEE;
const int32_t int_val = 0x83333333;
const uint32_t uint_val = 0xFDDDDDDD;
const int64_t long_val = 0x8444444444444444LL;
const uint64_t ulong_val = 0xFCCCCCCCCCCCCCCCULL;
const float float_val = 3.14159f;
const double double_val = 3.14159265359;
const int test_values_max = 11;
const flatbuffers::voffset_t fields_per_object = 4;
const int num_fuzz_objects = 10000; // The higher, the more thorough :)
flatbuffers::FlatBufferBuilder builder;
lcg_reset(); // Keep it deterministic.
flatbuffers::uoffset_t objects[num_fuzz_objects];
// Generate num_fuzz_objects random objects each consisting of
// fields_per_object fields, each of a random type.
for (int i = 0; i < num_fuzz_objects; i++) {
auto start = builder.StartTable();
for (flatbuffers::voffset_t f = 0; f < fields_per_object; f++) {
int choice = lcg_rand() % test_values_max;
auto off = flatbuffers::FieldIndexToOffset(f);
switch (choice) {
case 0: builder.AddElement<uint8_t>(off, bool_val, 0); break;
case 1: builder.AddElement<int8_t>(off, char_val, 0); break;
case 2: builder.AddElement<uint8_t>(off, uchar_val, 0); break;
case 3: builder.AddElement<int16_t>(off, short_val, 0); break;
case 4: builder.AddElement<uint16_t>(off, ushort_val, 0); break;
case 5: builder.AddElement<int32_t>(off, int_val, 0); break;
case 6: builder.AddElement<uint32_t>(off, uint_val, 0); break;
case 7: builder.AddElement<int64_t>(off, long_val, 0); break;
case 8: builder.AddElement<uint64_t>(off, ulong_val, 0); break;
case 9: builder.AddElement<float>(off, float_val, 0); break;
case 10: builder.AddElement<double>(off, double_val, 0); break;
}
}
objects[i] = builder.EndTable(start);
}
builder.PreAlign<flatbuffers::largest_scalar_t>(0); // Align whole buffer.
lcg_reset(); // Reset.
uint8_t *eob = builder.GetCurrentBufferPointer() + builder.GetSize();
// Test that all objects we generated are readable and return the
// expected values. We generate random objects in the same order
// so this is deterministic.
for (int i = 0; i < num_fuzz_objects; i++) {
auto table = reinterpret_cast<flatbuffers::Table *>(eob - objects[i]);
for (flatbuffers::voffset_t f = 0; f < fields_per_object; f++) {
int choice = lcg_rand() % test_values_max;
flatbuffers::voffset_t off = flatbuffers::FieldIndexToOffset(f);
switch (choice) {
case 0: CompareTableFieldValue(table, off, bool_val); break;
case 1: CompareTableFieldValue(table, off, char_val); break;
case 2: CompareTableFieldValue(table, off, uchar_val); break;
case 3: CompareTableFieldValue(table, off, short_val); break;
case 4: CompareTableFieldValue(table, off, ushort_val); break;
case 5: CompareTableFieldValue(table, off, int_val); break;
case 6: CompareTableFieldValue(table, off, uint_val); break;
case 7: CompareTableFieldValue(table, off, long_val); break;
case 8: CompareTableFieldValue(table, off, ulong_val); break;
case 9: CompareTableFieldValue(table, off, float_val); break;
case 10: CompareTableFieldValue(table, off, double_val); break;
}
}
}
}
// High level stress/fuzz test: generate a big schema and
// matching json data in random combinations, then parse both,
// generate json back from the binary, and compare with the original.
void FuzzTest2() {
lcg_reset(); // Keep it deterministic.
const int num_definitions = 30;
const int num_struct_definitions = 5; // Subset of num_definitions.
const int fields_per_definition = 15;
const int instances_per_definition = 5;
const int deprecation_rate = 10; // 1 in deprecation_rate fields will
// be deprecated.
std::string schema = "namespace test;\n\n";
struct RndDef {
std::string instances[instances_per_definition];
// Since we're generating schema and corresponding data in tandem,
// this convenience function adds strings to both at once.
static void Add(RndDef (&definitions_l)[num_definitions],
std::string &schema_l, const int instances_per_definition_l,
const char *schema_add, const char *instance_add,
int definition) {
schema_l += schema_add;
for (int i = 0; i < instances_per_definition_l; i++)
definitions_l[definition].instances[i] += instance_add;
}
};
// clang-format off
#define AddToSchemaAndInstances(schema_add, instance_add) \
RndDef::Add(definitions, schema, instances_per_definition, \
schema_add, instance_add, definition)
#define Dummy() \
RndDef::Add(definitions, schema, instances_per_definition, \
"byte", "1", definition)
// clang-format on
RndDef definitions[num_definitions];
// We are going to generate num_definitions, the first
// num_struct_definitions will be structs, the rest tables. For each
// generate random fields, some of which may be struct/table types
// referring to previously generated structs/tables.
// Simultanenously, we generate instances_per_definition JSON data
// definitions, which will have identical structure to the schema
// being generated. We generate multiple instances such that when creating
// hierarchy, we get some variety by picking one randomly.
for (int definition = 0; definition < num_definitions; definition++) {
std::string definition_name = "D" + flatbuffers::NumToString(definition);
bool is_struct = definition < num_struct_definitions;
AddToSchemaAndInstances(
((is_struct ? "struct " : "table ") + definition_name + " {\n").c_str(),
"{\n");
for (int field = 0; field < fields_per_definition; field++) {
const bool is_last_field = field == fields_per_definition - 1;
// Deprecate 1 in deprecation_rate fields. Only table fields can be
// deprecated.
// Don't deprecate the last field to avoid dangling commas in JSON.
const bool deprecated =
!is_struct && !is_last_field && (lcg_rand() % deprecation_rate == 0);
std::string field_name = "f" + flatbuffers::NumToString(field);
AddToSchemaAndInstances((" " + field_name + ":").c_str(),
deprecated ? "" : (field_name + ": ").c_str());
// Pick random type:
auto base_type = static_cast<flatbuffers::BaseType>(
lcg_rand() % (flatbuffers::BASE_TYPE_UNION + 1));
switch (base_type) {
case flatbuffers::BASE_TYPE_STRING:
if (is_struct) {
Dummy(); // No strings in structs.
} else {
AddToSchemaAndInstances("string", deprecated ? "" : "\"hi\"");
}
break;
case flatbuffers::BASE_TYPE_VECTOR:
if (is_struct) {
Dummy(); // No vectors in structs.
} else {
AddToSchemaAndInstances("[ubyte]",
deprecated ? "" : "[\n0,\n1,\n255\n]");
}
break;
case flatbuffers::BASE_TYPE_NONE:
case flatbuffers::BASE_TYPE_UTYPE:
case flatbuffers::BASE_TYPE_STRUCT:
case flatbuffers::BASE_TYPE_UNION:
if (definition) {
// Pick a random previous definition and random data instance of
// that definition.
int defref = lcg_rand() % definition;
int instance = lcg_rand() % instances_per_definition;
AddToSchemaAndInstances(
("D" + flatbuffers::NumToString(defref)).c_str(),
deprecated ? ""
: definitions[defref].instances[instance].c_str());
} else {
// If this is the first definition, we have no definition we can
// refer to.
Dummy();
}
break;
case flatbuffers::BASE_TYPE_BOOL:
AddToSchemaAndInstances(
"bool", deprecated ? "" : (lcg_rand() % 2 ? "true" : "false"));
break;
case flatbuffers::BASE_TYPE_ARRAY:
if (!is_struct) {
AddToSchemaAndInstances(
"ubyte",
deprecated ? "" : "255"); // No fixed-length arrays in tables.
} else {
AddToSchemaAndInstances("[int:3]", deprecated ? "" : "[\n,\n,\n]");
}
break;
default:
// All the scalar types.
schema += flatbuffers::kTypeNames[base_type];
if (!deprecated) {
// We want each instance to use its own random value.
for (int inst = 0; inst < instances_per_definition; inst++)
definitions[definition].instances[inst] +=
flatbuffers::IsFloat(base_type)
? flatbuffers::NumToString<double>(lcg_rand() % 128)
.c_str()
: flatbuffers::NumToString<int>(lcg_rand() % 128).c_str();
}
}
AddToSchemaAndInstances(deprecated ? "(deprecated);\n" : ";\n",
deprecated ? "" : is_last_field ? "\n" : ",\n");
}
AddToSchemaAndInstances("}\n\n", "}");
}
schema += "root_type D" + flatbuffers::NumToString(num_definitions - 1);
schema += ";\n";
flatbuffers::Parser parser;
// Will not compare against the original if we don't write defaults
parser.builder_.ForceDefaults(true);
// Parse the schema, parse the generated data, then generate text back
// from the binary and compare against the original.
TEST_EQ(parser.Parse(schema.c_str()), true);
const std::string &json =
definitions[num_definitions - 1].instances[0] + "\n";
TEST_EQ(parser.Parse(json.c_str()), true);
std::string jsongen;
parser.opts.indent_step = 0;
auto result =
GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
if (jsongen != json) {
// These strings are larger than a megabyte, so we show the bytes around
// the first bytes that are different rather than the whole string.
size_t len = std::min(json.length(), jsongen.length());
for (size_t i = 0; i < len; i++) {
if (json[i] != jsongen[i]) {
i -= std::min(static_cast<size_t>(10), i); // show some context;
size_t end = std::min(len, i + 20);
for (; i < end; i++)
TEST_OUTPUT_LINE("at %d: found \"%c\", expected \"%c\"\n",
static_cast<int>(i), jsongen[i], json[i]);
break;
}
}
TEST_NOTNULL(nullptr); //-V501 (this comment supresses CWE-570 warning)
}
// clang-format off
#ifdef FLATBUFFERS_TEST_VERBOSE
TEST_OUTPUT_LINE("%dk schema tested with %dk of json\n",
static_cast<int>(schema.length() / 1024),
static_cast<int>(json.length() / 1024));
#endif
// clang-format on
}
// Test that parser errors are actually generated.
void TestError_(const char *src, const char *error_substr, bool strict_json,
const char *file, int line, const char *func) {
flatbuffers::IDLOptions opts;
opts.strict_json = strict_json;
flatbuffers::Parser parser(opts);
if (parser.Parse(src)) {
TestFail("true", "false",
("parser.Parse(\"" + std::string(src) + "\")").c_str(), file, line,
func);
} else if (!strstr(parser.error_.c_str(), error_substr)) {
TestFail(error_substr, parser.error_.c_str(),
("parser.Parse(\"" + std::string(src) + "\")").c_str(), file, line,
func);
}
}
void TestError_(const char *src, const char *error_substr, const char *file,
int line, const char *func) {
TestError_(src, error_substr, false, file, line, func);
}
#ifdef _WIN32
# define TestError(src, ...) \
TestError_(src, __VA_ARGS__, __FILE__, __LINE__, __FUNCTION__)
#else
# define TestError(src, ...) \
TestError_(src, __VA_ARGS__, __FILE__, __LINE__, __PRETTY_FUNCTION__)
#endif
// Test that parsing errors occur as we'd expect.
// Also useful for coverage, making sure these paths are run.
void ErrorTest() {
// In order they appear in idl_parser.cpp
TestError("table X { Y:byte; } root_type X; { Y: 999 }", "does not fit");
TestError("\"\0", "illegal");
TestError("\"\\q", "escape code");
TestError("table ///", "documentation");
TestError("@", "illegal");
TestError("table 1", "expecting");
TestError("table X { Y:[[int]]; }", "nested vector");
TestError("table X { Y:1; }", "illegal type");
TestError("table X { Y:int; Y:int; }", "field already");
TestError("table Y {} table X { Y:int; }", "same as table");
TestError("struct X { Y:string; }", "only scalar");
TestError("struct X { a:uint = 42; }", "default values");
TestError("enum Y:byte { Z = 1 } table X { y:Y; }", "not part of enum");
TestError("struct X { Y:int (deprecated); }", "deprecate");
TestError("union Z { X } table X { Y:Z; } root_type X; { Y: {}, A:1 }",
"missing type field");
TestError("union Z { X } table X { Y:Z; } root_type X; { Y_type: 99, Y: {",
"type id");
TestError("table X { Y:int; } root_type X; { Z:", "unknown field");
TestError("table X { Y:int; } root_type X; { Y:", "string constant", true);
TestError("table X { Y:int; } root_type X; { \"Y\":1, }", "string constant",
true);
TestError(
"struct X { Y:int; Z:int; } table W { V:X; } root_type W; "
"{ V:{ Y:1 } }",
"wrong number");
TestError("enum E:byte { A } table X { Y:E; } root_type X; { Y:U }",
"unknown enum value");
TestError("table X { Y:byte; } root_type X; { Y:; }", "starting");
TestError("enum X:byte { Y } enum X {", "enum already");
TestError("enum X:float {}", "underlying");
TestError("enum X:byte { Y, Y }", "value already");
TestError("enum X:byte { Y=2, Z=2 }", "unique");
TestError("table X { Y:int; } table X {", "datatype already");
TestError("struct X (force_align: 7) { Y:int; }", "force_align");
TestError("struct X {}", "size 0");
TestError("{}", "no root");
TestError("table X { Y:byte; } root_type X; { Y:1 } { Y:1 }", "end of file");
TestError("table X { Y:byte; } root_type X; { Y:1 } table Y{ Z:int }",
"end of file");
TestError("root_type X;", "unknown root");
TestError("struct X { Y:int; } root_type X;", "a table");
TestError("union X { Y }", "referenced");
TestError("union Z { X } struct X { Y:int; }", "only tables");
TestError("table X { Y:[int]; YLength:int; }", "clash");
TestError("table X { Y:byte; } root_type X; { Y:1, Y:2 }", "more than once");
// float to integer conversion is forbidden
TestError("table X { Y:int; } root_type X; { Y:1.0 }", "float");
TestError("table X { Y:bool; } root_type X; { Y:1.0 }", "float");
TestError("enum X:bool { Y = true }", "must be integral");
// Array of non-scalar
TestError("table X { x:int; } struct Y { y:[X:2]; }",
"may contain only scalar or struct fields");
// Non-snake case field names
TestError("table X { Y: int; } root_type Y: {Y:1.0}", "snake_case");
// Complex defaults
TestError("table X { y: string = 1; }", "expecting: string");
TestError("table X { y: string = []; }", " Cannot assign token");
TestError("table X { y: [int] = [1]; }", "Expected `]`");
TestError("table X { y: [int] = [; }", "Expected `]`");
TestError("table X { y: [int] = \"\"; }", "type mismatch");
// An identifier can't start from sign (+|-)
TestError("table X { -Y: int; } root_type Y: {Y:1.0}", "identifier");
TestError("table X { +Y: int; } root_type Y: {Y:1.0}", "identifier");
}
template<typename T>
T TestValue(const char *json, const char *type_name,
const char *decls = nullptr) {
flatbuffers::Parser parser;
parser.builder_.ForceDefaults(true); // return defaults
auto check_default = json ? false : true;
if (check_default) { parser.opts.output_default_scalars_in_json = true; }
// Simple schema.
std::string schema = std::string(decls ? decls : "") + "\n" +
"table X { y:" + std::string(type_name) +
"; } root_type X;";
auto schema_done = parser.Parse(schema.c_str());
TEST_EQ_STR(parser.error_.c_str(), "");
TEST_EQ(schema_done, true);
auto done = parser.Parse(check_default ? "{}" : json);
TEST_EQ_STR(parser.error_.c_str(), "");
TEST_EQ(done, true);
// Check with print.
std::string print_back;
parser.opts.indent_step = -1;
TEST_EQ(GenerateText(parser, parser.builder_.GetBufferPointer(), &print_back),
true);
// restore value from its default
if (check_default) { TEST_EQ(parser.Parse(print_back.c_str()), true); }
auto root = flatbuffers::GetRoot<flatbuffers::Table>(
parser.builder_.GetBufferPointer());
return root->GetField<T>(flatbuffers::FieldIndexToOffset(0), 0);
}
bool FloatCompare(float a, float b) { return fabs(a - b) < 0.001; }
// Additional parser testing not covered elsewhere.
void ValueTest() {
// Test scientific notation numbers.
TEST_EQ(
FloatCompare(TestValue<float>("{ y:0.0314159e+2 }", "float"), 3.14159f),
true);
// number in string
TEST_EQ(FloatCompare(TestValue<float>("{ y:\"0.0314159e+2\" }", "float"),
3.14159f),
true);
// Test conversion functions.
TEST_EQ(FloatCompare(TestValue<float>("{ y:cos(rad(180)) }", "float"), -1),
true);
// int embedded to string
TEST_EQ(TestValue<int>("{ y:\"-876\" }", "int=-123"), -876);
TEST_EQ(TestValue<int>("{ y:\"876\" }", "int=-123"), 876);
// Test negative hex constant.
TEST_EQ(TestValue<int>("{ y:-0x8ea0 }", "int=-0x8ea0"), -36512);
TEST_EQ(TestValue<int>(nullptr, "int=-0x8ea0"), -36512);
// positive hex constant
TEST_EQ(TestValue<int>("{ y:0x1abcdef }", "int=0x1"), 0x1abcdef);
// with optional '+' sign
TEST_EQ(TestValue<int>("{ y:+0x1abcdef }", "int=+0x1"), 0x1abcdef);
// hex in string
TEST_EQ(TestValue<int>("{ y:\"0x1abcdef\" }", "int=+0x1"), 0x1abcdef);
// Make sure we do unsigned 64bit correctly.
TEST_EQ(TestValue<uint64_t>("{ y:12335089644688340133 }", "ulong"),
12335089644688340133ULL);
// bool in string
TEST_EQ(TestValue<bool>("{ y:\"false\" }", "bool=true"), false);
TEST_EQ(TestValue<bool>("{ y:\"true\" }", "bool=\"true\""), true);
TEST_EQ(TestValue<bool>("{ y:'false' }", "bool=true"), false);
TEST_EQ(TestValue<bool>("{ y:'true' }", "bool=\"true\""), true);
// check comments before and after json object
TEST_EQ(TestValue<int>("/*before*/ { y:1 } /*after*/", "int"), 1);
TEST_EQ(TestValue<int>("//before \n { y:1 } //after", "int"), 1);
}
void NestedListTest() {
flatbuffers::Parser parser1;
TEST_EQ(parser1.Parse("struct Test { a:short; b:byte; } table T { F:[Test]; }"
"root_type T;"
"{ F:[ [10,20], [30,40]] }"),
true);
}
void EnumStringsTest() {
flatbuffers::Parser parser1;
TEST_EQ(parser1.Parse("enum E:byte { A, B, C } table T { F:[E]; }"
"root_type T;"
"{ F:[ A, B, \"C\", \"A B C\" ] }"),
true);
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse("enum E:byte { A, B, C } table T { F:[int]; }"
"root_type T;"
"{ F:[ \"E.C\", \"E.A E.B E.C\" ] }"),
true);
// unsigned bit_flags
flatbuffers::Parser parser3;
TEST_EQ(
parser3.Parse("enum E:uint16 (bit_flags) { F0, F07=7, F08, F14=14, F15 }"
" table T { F: E = \"F15 F08\"; }"
"root_type T;"),
true);
}
void EnumNamesTest() {
TEST_EQ_STR("Red", EnumNameColor(Color_Red));
TEST_EQ_STR("Green", EnumNameColor(Color_Green));
TEST_EQ_STR("Blue", EnumNameColor(Color_Blue));
// Check that Color to string don't crash while decode a mixture of Colors.
// 1) Example::Color enum is enum with unfixed underlying type.
// 2) Valid enum range: [0; 2^(ceil(log2(Color_ANY))) - 1].
// Consequence: A value is out of this range will lead to UB (since C++17).
// For details see C++17 standard or explanation on the SO:
// stackoverflow.com/questions/18195312/what-happens-if-you-static-cast-invalid-value-to-enum-class
TEST_EQ_STR("", EnumNameColor(static_cast<Color>(0)));
TEST_EQ_STR("", EnumNameColor(static_cast<Color>(Color_ANY - 1)));
TEST_EQ_STR("", EnumNameColor(static_cast<Color>(Color_ANY + 1)));
}
void EnumOutOfRangeTest() {
TestError("enum X:byte { Y = 128 }", "enum value does not fit");
TestError("enum X:byte { Y = -129 }", "enum value does not fit");
TestError("enum X:byte { Y = 126, Z0, Z1 }", "enum value does not fit");
TestError("enum X:ubyte { Y = -1 }", "enum value does not fit");
TestError("enum X:ubyte { Y = 256 }", "enum value does not fit");
TestError("enum X:ubyte { Y = 255, Z }", "enum value does not fit");
TestError("table Y{} union X { Y = -1 }", "enum value does not fit");
TestError("table Y{} union X { Y = 256 }", "enum value does not fit");
TestError("table Y{} union X { Y = 255, Z:Y }", "enum value does not fit");
TestError("enum X:int { Y = -2147483649 }", "enum value does not fit");
TestError("enum X:int { Y = 2147483648 }", "enum value does not fit");
TestError("enum X:uint { Y = -1 }", "enum value does not fit");
TestError("enum X:uint { Y = 4294967297 }", "enum value does not fit");
TestError("enum X:long { Y = 9223372036854775808 }", "does not fit");
TestError("enum X:long { Y = 9223372036854775807, Z }",
"enum value does not fit");
TestError("enum X:ulong { Y = -1 }", "does not fit");
TestError("enum X:ubyte (bit_flags) { Y=8 }", "bit flag out");
TestError("enum X:byte (bit_flags) { Y=7 }", "must be unsigned"); // -128
// bit_flgs out of range
TestError("enum X:ubyte (bit_flags) { Y0,Y1,Y2,Y3,Y4,Y5,Y6,Y7,Y8 }",
"out of range");
}
void EnumValueTest() {
// json: "{ Y:0 }", schema: table X { y: "E"}
// 0 in enum (V=0) E then Y=0 is valid.
TEST_EQ(TestValue<int>("{ y:0 }", "E", "enum E:int { V }"), 0);
TEST_EQ(TestValue<int>("{ y:V }", "E", "enum E:int { V }"), 0);
// A default value of Y is 0.
TEST_EQ(TestValue<int>("{ }", "E", "enum E:int { V }"), 0);
TEST_EQ(TestValue<int>("{ y:5 }", "E=V", "enum E:int { V=5 }"), 5);
// Generate json with defaults and check.
TEST_EQ(TestValue<int>(nullptr, "E=V", "enum E:int { V=5 }"), 5);
// 5 in enum
TEST_EQ(TestValue<int>("{ y:5 }", "E", "enum E:int { Z, V=5 }"), 5);
TEST_EQ(TestValue<int>("{ y:5 }", "E=V", "enum E:int { Z, V=5 }"), 5);
// Generate json with defaults and check.
TEST_EQ(TestValue<int>(nullptr, "E", "enum E:int { Z, V=5 }"), 0);
TEST_EQ(TestValue<int>(nullptr, "E=V", "enum E:int { Z, V=5 }"), 5);
// u84 test
TEST_EQ(TestValue<uint64_t>(nullptr, "E=V",
"enum E:ulong { V = 13835058055282163712 }"),
13835058055282163712ULL);
TEST_EQ(TestValue<uint64_t>(nullptr, "E=V",
"enum E:ulong { V = 18446744073709551615 }"),
18446744073709551615ULL);
// Assign non-enum value to enum field. Is it right?
TEST_EQ(TestValue<int>("{ y:7 }", "E", "enum E:int { V = 0 }"), 7);
// Check that non-ascending values are valid.
TEST_EQ(TestValue<int>("{ y:5 }", "E=V", "enum E:int { Z=10, V=5 }"), 5);
}
void IntegerOutOfRangeTest() {
TestError("table T { F:byte; } root_type T; { F:128 }",
"constant does not fit");
TestError("table T { F:byte; } root_type T; { F:-129 }",
"constant does not fit");
TestError("table T { F:ubyte; } root_type T; { F:256 }",
"constant does not fit");
TestError("table T { F:ubyte; } root_type T; { F:-1 }",
"constant does not fit");
TestError("table T { F:short; } root_type T; { F:32768 }",
"constant does not fit");
TestError("table T { F:short; } root_type T; { F:-32769 }",
"constant does not fit");
TestError("table T { F:ushort; } root_type T; { F:65536 }",
"constant does not fit");
TestError("table T { F:ushort; } root_type T; { F:-1 }",
"constant does not fit");
TestError("table T { F:int; } root_type T; { F:2147483648 }",
"constant does not fit");
TestError("table T { F:int; } root_type T; { F:-2147483649 }",
"constant does not fit");
TestError("table T { F:uint; } root_type T; { F:4294967296 }",
"constant does not fit");
TestError("table T { F:uint; } root_type T; { F:-1 }",
"constant does not fit");
// Check fixed width aliases
TestError("table X { Y:uint8; } root_type X; { Y: -1 }", "does not fit");
TestError("table X { Y:uint8; } root_type X; { Y: 256 }", "does not fit");
TestError("table X { Y:uint16; } root_type X; { Y: -1 }", "does not fit");
TestError("table X { Y:uint16; } root_type X; { Y: 65536 }", "does not fit");
TestError("table X { Y:uint32; } root_type X; { Y: -1 }", "");
TestError("table X { Y:uint32; } root_type X; { Y: 4294967296 }",
"does not fit");
TestError("table X { Y:uint64; } root_type X; { Y: -1 }", "");
TestError("table X { Y:uint64; } root_type X; { Y: -9223372036854775809 }",
"does not fit");
TestError("table X { Y:uint64; } root_type X; { Y: 18446744073709551616 }",
"does not fit");
TestError("table X { Y:int8; } root_type X; { Y: -129 }", "does not fit");
TestError("table X { Y:int8; } root_type X; { Y: 128 }", "does not fit");
TestError("table X { Y:int16; } root_type X; { Y: -32769 }", "does not fit");
TestError("table X { Y:int16; } root_type X; { Y: 32768 }", "does not fit");
TestError("table X { Y:int32; } root_type X; { Y: -2147483649 }", "");
TestError("table X { Y:int32; } root_type X; { Y: 2147483648 }",
"does not fit");
TestError("table X { Y:int64; } root_type X; { Y: -9223372036854775809 }",
"does not fit");
TestError("table X { Y:int64; } root_type X; { Y: 9223372036854775808 }",
"does not fit");
// check out-of-int64 as int8
TestError("table X { Y:int8; } root_type X; { Y: -9223372036854775809 }",
"does not fit");
TestError("table X { Y:int8; } root_type X; { Y: 9223372036854775808 }",
"does not fit");
// Check default values
TestError("table X { Y:int64=-9223372036854775809; } root_type X; {}",
"does not fit");
TestError("table X { Y:int64= 9223372036854775808; } root_type X; {}",
"does not fit");
TestError("table X { Y:uint64; } root_type X; { Y: -1 }", "");
TestError("table X { Y:uint64=-9223372036854775809; } root_type X; {}",
"does not fit");
TestError("table X { Y:uint64= 18446744073709551616; } root_type X; {}",
"does not fit");
}
void IntegerBoundaryTest() {
// Check numerical compatibility with non-C++ languages.
// By the C++ standard, std::numerical_limits<int64_t>::min() ==
// -9223372036854775807 (-2^63+1) or less* The Flatbuffers grammar and most of
// the languages (C#, Java, Rust) expect that minimum values are: -128,
// -32768,.., -9223372036854775808. Since C++20,
// static_cast<int64>(0x8000000000000000ULL) is well-defined two's complement
// cast. Therefore -9223372036854775808 should be valid negative value.
TEST_EQ(flatbuffers::numeric_limits<int8_t>::min(), -128);
TEST_EQ(flatbuffers::numeric_limits<int8_t>::max(), 127);
TEST_EQ(flatbuffers::numeric_limits<int16_t>::min(), -32768);
TEST_EQ(flatbuffers::numeric_limits<int16_t>::max(), 32767);
TEST_EQ(flatbuffers::numeric_limits<int32_t>::min() + 1, -2147483647);
TEST_EQ(flatbuffers::numeric_limits<int32_t>::max(), 2147483647ULL);
TEST_EQ(flatbuffers::numeric_limits<int64_t>::min() + 1LL,
-9223372036854775807LL);
TEST_EQ(flatbuffers::numeric_limits<int64_t>::max(), 9223372036854775807ULL);
TEST_EQ(flatbuffers::numeric_limits<uint8_t>::max(), 255);
TEST_EQ(flatbuffers::numeric_limits<uint16_t>::max(), 65535);
TEST_EQ(flatbuffers::numeric_limits<uint32_t>::max(), 4294967295ULL);
TEST_EQ(flatbuffers::numeric_limits<uint64_t>::max(),
18446744073709551615ULL);
TEST_EQ(TestValue<int8_t>("{ y:127 }", "byte"), 127);
TEST_EQ(TestValue<int8_t>("{ y:-128 }", "byte"), -128);
TEST_EQ(TestValue<uint8_t>("{ y:255 }", "ubyte"), 255);
TEST_EQ(TestValue<uint8_t>("{ y:0 }", "ubyte"), 0);
TEST_EQ(TestValue<int16_t>("{ y:32767 }", "short"), 32767);
TEST_EQ(TestValue<int16_t>("{ y:-32768 }", "short"), -32768);
TEST_EQ(TestValue<uint16_t>("{ y:65535 }", "ushort"), 65535);
TEST_EQ(TestValue<uint16_t>("{ y:0 }", "ushort"), 0);
TEST_EQ(TestValue<int32_t>("{ y:2147483647 }", "int"), 2147483647);
TEST_EQ(TestValue<int32_t>("{ y:-2147483648 }", "int") + 1, -2147483647);
TEST_EQ(TestValue<uint32_t>("{ y:4294967295 }", "uint"), 4294967295);
TEST_EQ(TestValue<uint32_t>("{ y:0 }", "uint"), 0);
TEST_EQ(TestValue<int64_t>("{ y:9223372036854775807 }", "long"),
9223372036854775807LL);
TEST_EQ(TestValue<int64_t>("{ y:-9223372036854775808 }", "long") + 1LL,
-9223372036854775807LL);
TEST_EQ(TestValue<uint64_t>("{ y:18446744073709551615 }", "ulong"),
18446744073709551615ULL);
TEST_EQ(TestValue<uint64_t>("{ y:0 }", "ulong"), 0);
TEST_EQ(TestValue<uint64_t>("{ y: 18446744073709551615 }", "uint64"),
18446744073709551615ULL);
// check that the default works
TEST_EQ(TestValue<uint64_t>(nullptr, "uint64 = 18446744073709551615"),
18446744073709551615ULL);
}
void ValidFloatTest() {
// check rounding to infinity
TEST_EQ(TestValue<float>("{ y:+3.4029e+38 }", "float"), +infinity_f);
TEST_EQ(TestValue<float>("{ y:-3.4029e+38 }", "float"), -infinity_f);
TEST_EQ(TestValue<double>("{ y:+1.7977e+308 }", "double"), +infinity_d);
TEST_EQ(TestValue<double>("{ y:-1.7977e+308 }", "double"), -infinity_d);
TEST_EQ(
FloatCompare(TestValue<float>("{ y:0.0314159e+2 }", "float"), 3.14159f),
true);
// float in string
TEST_EQ(FloatCompare(TestValue<float>("{ y:\" 0.0314159e+2 \" }", "float"),
3.14159f),
true);
TEST_EQ(TestValue<float>("{ y:1 }", "float"), 1.0f);
TEST_EQ(TestValue<float>("{ y:1.0 }", "float"), 1.0f);
TEST_EQ(TestValue<float>("{ y:1. }", "float"), 1.0f);
TEST_EQ(TestValue<float>("{ y:+1. }", "float"), 1.0f);
TEST_EQ(TestValue<float>("{ y:-1. }", "float"), -1.0f);
TEST_EQ(TestValue<float>("{ y:1.e0 }", "float"), 1.0f);
TEST_EQ(TestValue<float>("{ y:1.e+0 }", "float"), 1.0f);
TEST_EQ(TestValue<float>("{ y:1.e-0 }", "float"), 1.0f);
TEST_EQ(TestValue<float>("{ y:0.125 }", "float"), 0.125f);
TEST_EQ(TestValue<float>("{ y:.125 }", "float"), 0.125f);
TEST_EQ(TestValue<float>("{ y:-.125 }", "float"), -0.125f);
TEST_EQ(TestValue<float>("{ y:+.125 }", "float"), +0.125f);
TEST_EQ(TestValue<float>("{ y:5 }", "float"), 5.0f);
TEST_EQ(TestValue<float>("{ y:\"5\" }", "float"), 5.0f);
#if defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Old MSVC versions may have problem with this check.
// https://www.exploringbinary.com/visual-c-plus-plus-strtod-still-broken/
TEST_EQ(TestValue<double>("{ y:6.9294956446009195e15 }", "double"),
6929495644600920.0);
// check nan's
TEST_EQ(std::isnan(TestValue<double>("{ y:nan }", "double")), true);
TEST_EQ(std::isnan(TestValue<float>("{ y:nan }", "float")), true);
TEST_EQ(std::isnan(TestValue<float>("{ y:\"nan\" }", "float")), true);
TEST_EQ(std::isnan(TestValue<float>("{ y:\"+nan\" }", "float")), true);
TEST_EQ(std::isnan(TestValue<float>("{ y:\"-nan\" }", "float")), true);
TEST_EQ(std::isnan(TestValue<float>("{ y:+nan }", "float")), true);
TEST_EQ(std::isnan(TestValue<float>("{ y:-nan }", "float")), true);
TEST_EQ(std::isnan(TestValue<float>(nullptr, "float=nan")), true);
TEST_EQ(std::isnan(TestValue<float>(nullptr, "float=-nan")), true);
// check inf
TEST_EQ(TestValue<float>("{ y:inf }", "float"), infinity_f);
TEST_EQ(TestValue<float>("{ y:\"inf\" }", "float"), infinity_f);
TEST_EQ(TestValue<float>("{ y:\"-inf\" }", "float"), -infinity_f);
TEST_EQ(TestValue<float>("{ y:\"+inf\" }", "float"), infinity_f);
TEST_EQ(TestValue<float>("{ y:+inf }", "float"), infinity_f);
TEST_EQ(TestValue<float>("{ y:-inf }", "float"), -infinity_f);
TEST_EQ(TestValue<float>(nullptr, "float=inf"), infinity_f);
TEST_EQ(TestValue<float>(nullptr, "float=-inf"), -infinity_f);
TestValue<double>(
"{ y: [0.2, .2, 1.0, -1.0, -2., 2., 1e0, -1e0, 1.0e0, -1.0e0, -3.e2, "
"3.0e2] }",
"[double]");
TestValue<float>(
"{ y: [0.2, .2, 1.0, -1.0, -2., 2., 1e0, -1e0, 1.0e0, -1.0e0, -3.e2, "
"3.0e2] }",
"[float]");
// Test binary format of float point.
// https://en.cppreference.com/w/cpp/language/floating_literal
// 0x11.12p-1 = (1*16^1 + 2*16^0 + 3*16^-1 + 4*16^-2) * 2^-1 =
TEST_EQ(TestValue<double>("{ y:0x12.34p-1 }", "double"), 9.1015625);
// hex fraction 1.2 (decimal 1.125) scaled by 2^3, that is 9.0
TEST_EQ(TestValue<float>("{ y:-0x0.2p0 }", "float"), -0.125f);
TEST_EQ(TestValue<float>("{ y:-0x.2p1 }", "float"), -0.25f);
TEST_EQ(TestValue<float>("{ y:0x1.2p3 }", "float"), 9.0f);
TEST_EQ(TestValue<float>("{ y:0x10.1p0 }", "float"), 16.0625f);
TEST_EQ(TestValue<double>("{ y:0x1.2p3 }", "double"), 9.0);
TEST_EQ(TestValue<double>("{ y:0x10.1p0 }", "double"), 16.0625);
TEST_EQ(TestValue<double>("{ y:0xC.68p+2 }", "double"), 49.625);
TestValue<double>("{ y: [0x20.4ep1, +0x20.4ep1, -0x20.4ep1] }", "[double]");
TestValue<float>("{ y: [0x20.4ep1, +0x20.4ep1, -0x20.4ep1] }", "[float]");
#else // FLATBUFFERS_HAS_NEW_STRTOD
TEST_OUTPUT_LINE("FLATBUFFERS_HAS_NEW_STRTOD tests skipped");
#endif // !FLATBUFFERS_HAS_NEW_STRTOD
}
void InvalidFloatTest() {
auto invalid_msg = "invalid number";
auto comma_msg = "expecting: ,";
TestError("table T { F:float; } root_type T; { F:1,0 }", "");
TestError("table T { F:float; } root_type T; { F:. }", "");
TestError("table T { F:float; } root_type T; { F:- }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:+ }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:-. }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:+. }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:.e }", "");
TestError("table T { F:float; } root_type T; { F:-e }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:+e }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:-.e }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:+.e }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:-e1 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:+e1 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.0e+ }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.0e- }", invalid_msg);
// exponent pP is mandatory for hex-float
TestError("table T { F:float; } root_type T; { F:0x0 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:-0x. }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x. }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0Xe }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\"0Xe\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\"nan(1)\" }", invalid_msg);
// eE not exponent in hex-float!
TestError("table T { F:float; } root_type T; { F:0x0.0e+ }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0e- }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0p }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0p+ }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0p- }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0pa1 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0e+ }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0e- }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0e+0 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0e-0 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0ep+ }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:0x0.0ep- }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.2.3 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.2.e3 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.2e.3 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.2e0.3 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.2e3. }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.2e3.0 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:+-1.0 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.0e+-1 }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\"1.0e+-1\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:1.e0e }", comma_msg);
TestError("table T { F:float; } root_type T; { F:0x1.p0e }", comma_msg);
TestError("table T { F:float; } root_type T; { F:\" 0x10 \" }", invalid_msg);
// floats in string
TestError("table T { F:float; } root_type T; { F:\"1,2.\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\"1.2e3.\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\"0x1.p0e\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\"0x1.0\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\" 0x1.0\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:\"+ 0\" }", invalid_msg);
// disable escapes for "number-in-string"
TestError("table T { F:float; } root_type T; { F:\"\\f1.2e3.\" }", "invalid");
TestError("table T { F:float; } root_type T; { F:\"\\t1.2e3.\" }", "invalid");
TestError("table T { F:float; } root_type T; { F:\"\\n1.2e3.\" }", "invalid");
TestError("table T { F:float; } root_type T; { F:\"\\r1.2e3.\" }", "invalid");
TestError("table T { F:float; } root_type T; { F:\"4\\x005\" }", "invalid");
TestError("table T { F:float; } root_type T; { F:\"\'12\'\" }", invalid_msg);
// null is not a number constant!
TestError("table T { F:float; } root_type T; { F:\"null\" }", invalid_msg);
TestError("table T { F:float; } root_type T; { F:null }", invalid_msg);
}
void GenerateTableTextTest() {
std::string schemafile;
std::string jsonfile;
bool ok =
flatbuffers::LoadFile((test_data_path + "monster_test.fbs").c_str(),
false, &schemafile) &&
flatbuffers::LoadFile((test_data_path + "monsterdata_test.json").c_str(),
false, &jsonfile);
TEST_EQ(ok, true);
auto include_test_path =
flatbuffers::ConCatPathFileName(test_data_path, "include_test");
const char *include_directories[] = { test_data_path.c_str(),
include_test_path.c_str(), nullptr };
flatbuffers::IDLOptions opt;
opt.indent_step = -1;
flatbuffers::Parser parser(opt);
ok = parser.Parse(schemafile.c_str(), include_directories) &&
parser.Parse(jsonfile.c_str(), include_directories);
TEST_EQ(ok, true);
// Test root table
const Monster *monster = GetMonster(parser.builder_.GetBufferPointer());
const auto abilities = monster->testarrayofsortedstruct();
TEST_EQ(abilities->size(), 3);
TEST_EQ(abilities->Get(0)->id(), 0);
TEST_EQ(abilities->Get(0)->distance(), 45);
TEST_EQ(abilities->Get(1)->id(), 1);
TEST_EQ(abilities->Get(1)->distance(), 21);
TEST_EQ(abilities->Get(2)->id(), 5);
TEST_EQ(abilities->Get(2)->distance(), 12);
std::string jsongen;
auto result = GenerateTextFromTable(parser, monster, "MyGame.Example.Monster",
&jsongen);
TEST_EQ(result, true);
// Test sub table
const Vec3 *pos = monster->pos();
jsongen.clear();
result = GenerateTextFromTable(parser, pos, "MyGame.Example.Vec3", &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(
jsongen.c_str(),
"{x: 1.0,y: 2.0,z: 3.0,test1: 3.0,test2: \"Green\",test3: {a: 5,b: 6}}");
const Test &test3 = pos->test3();
jsongen.clear();
result =
GenerateTextFromTable(parser, &test3, "MyGame.Example.Test", &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(jsongen.c_str(), "{a: 5,b: 6}");
const Test *test4 = monster->test4()->Get(0);
jsongen.clear();
result =
GenerateTextFromTable(parser, test4, "MyGame.Example.Test", &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(jsongen.c_str(), "{a: 10,b: 20}");
}
template<typename T>
void NumericUtilsTestInteger(const char *lower, const char *upper) {
T x;
TEST_EQ(flatbuffers::StringToNumber("1q", &x), false);
TEST_EQ(x, 0);
TEST_EQ(flatbuffers::StringToNumber(upper, &x), false);
TEST_EQ(x, flatbuffers::numeric_limits<T>::max());
TEST_EQ(flatbuffers::StringToNumber(lower, &x), false);
auto expval = flatbuffers::is_unsigned<T>::value
? flatbuffers::numeric_limits<T>::max()
: flatbuffers::numeric_limits<T>::lowest();
TEST_EQ(x, expval);
}
template<typename T>
void NumericUtilsTestFloat(const char *lower, const char *upper) {
T f;
TEST_EQ(flatbuffers::StringToNumber("", &f), false);
TEST_EQ(flatbuffers::StringToNumber("1q", &f), false);
TEST_EQ(f, 0);
TEST_EQ(flatbuffers::StringToNumber(upper, &f), true);
TEST_EQ(f, +flatbuffers::numeric_limits<T>::infinity());
TEST_EQ(flatbuffers::StringToNumber(lower, &f), true);
TEST_EQ(f, -flatbuffers::numeric_limits<T>::infinity());
}
void NumericUtilsTest() {
NumericUtilsTestInteger<uint64_t>("-1", "18446744073709551616");
NumericUtilsTestInteger<uint8_t>("-1", "256");
NumericUtilsTestInteger<int64_t>("-9223372036854775809",
"9223372036854775808");
NumericUtilsTestInteger<int8_t>("-129", "128");
NumericUtilsTestFloat<float>("-3.4029e+38", "+3.4029e+38");
NumericUtilsTestFloat<float>("-1.7977e+308", "+1.7977e+308");
}
void IsAsciiUtilsTest() {
char c = -128;
for (int cnt = 0; cnt < 256; cnt++) {
auto alpha = (('a' <= c) && (c <= 'z')) || (('A' <= c) && (c <= 'Z'));
auto dec = (('0' <= c) && (c <= '9'));
auto hex = (('a' <= c) && (c <= 'f')) || (('A' <= c) && (c <= 'F'));
TEST_EQ(flatbuffers::is_alpha(c), alpha);
TEST_EQ(flatbuffers::is_alnum(c), alpha || dec);
TEST_EQ(flatbuffers::is_digit(c), dec);
TEST_EQ(flatbuffers::is_xdigit(c), dec || hex);
c += 1;
}
}
void UnicodeTest() {
flatbuffers::Parser parser;
// Without setting allow_non_utf8 = true, we treat \x sequences as byte
// sequences which are then validated as UTF-8.
TEST_EQ(parser.Parse("table T { F:string; }"
"root_type T;"
"{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\xE2\\x82\\xAC\\u0080\\uD8"
"3D\\uDE0E\" }"),
true);
std::string jsongen;
parser.opts.indent_step = -1;
auto result =
GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(jsongen.c_str(),
"{F: \"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\u20AC\\u0080\\uD83D\\uDE0E\"}");
}
void UnicodeTestAllowNonUTF8() {
flatbuffers::Parser parser;
parser.opts.allow_non_utf8 = true;
TEST_EQ(
parser.Parse(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\x01\\x80\\u0080\\uD83D\\uDE0E\" }"),
true);
std::string jsongen;
parser.opts.indent_step = -1;
auto result =
GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(
jsongen.c_str(),
"{F: \"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\u0001\\x80\\u0080\\uD83D\\uDE0E\"}");
}
void UnicodeTestGenerateTextFailsOnNonUTF8() {
flatbuffers::Parser parser;
// Allow non-UTF-8 initially to model what happens when we load a binary
// flatbuffer from disk which contains non-UTF-8 strings.
parser.opts.allow_non_utf8 = true;
TEST_EQ(
parser.Parse(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\u20AC\\u00A2\\u30E6\\u30FC\\u30B6\\u30FC"
"\\u5225\\u30B5\\u30A4\\u30C8\\x01\\x80\\u0080\\uD83D\\uDE0E\" }"),
true);
std::string jsongen;
parser.opts.indent_step = -1;
// Now, disallow non-UTF-8 (the default behavior) so GenerateText indicates
// failure.
parser.opts.allow_non_utf8 = false;
auto result =
GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, false);
}
void UnicodeSurrogatesTest() {
flatbuffers::Parser parser;
TEST_EQ(parser.Parse("table T { F:string (id: 0); }"
"root_type T;"
"{ F:\"\\uD83D\\uDCA9\"}"),
true);
auto root = flatbuffers::GetRoot<flatbuffers::Table>(
parser.builder_.GetBufferPointer());
auto string = root->GetPointer<flatbuffers::String *>(
flatbuffers::FieldIndexToOffset(0));
TEST_EQ_STR(string->c_str(), "\xF0\x9F\x92\xA9");
}
void UnicodeInvalidSurrogatesTest() {
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800\"}",
"unpaired high surrogate");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800abcd\"}",
"unpaired high surrogate");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800\\n\"}",
"unpaired high surrogate");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uD800\\uD800\"}",
"multiple high surrogates");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\\uDC00\"}",
"unpaired low surrogate");
}
void InvalidUTF8Test() {
// "1 byte" pattern, under min length of 2 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\x80\"}",
"illegal UTF-8 sequence");
// 2 byte pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xDF\"}",
"illegal UTF-8 sequence");
// 3 byte pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xEF\xBF\"}",
"illegal UTF-8 sequence");
// 4 byte pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF7\xBF\xBF\"}",
"illegal UTF-8 sequence");
// "5 byte" pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFB\xBF\xBF\xBF\"}",
"illegal UTF-8 sequence");
// "6 byte" pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFD\xBF\xBF\xBF\xBF\"}",
"illegal UTF-8 sequence");
// "7 byte" pattern, string too short
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFE\xBF\xBF\xBF\xBF\xBF\"}",
"illegal UTF-8 sequence");
// "5 byte" pattern, over max length of 4 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFB\xBF\xBF\xBF\xBF\"}",
"illegal UTF-8 sequence");
// "6 byte" pattern, over max length of 4 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFD\xBF\xBF\xBF\xBF\xBF\"}",
"illegal UTF-8 sequence");
// "7 byte" pattern, over max length of 4 bytes
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xFE\xBF\xBF\xBF\xBF\xBF\xBF\"}",
"illegal UTF-8 sequence");
// Three invalid encodings for U+000A (\n, aka NEWLINE)
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xC0\x8A\"}",
"illegal UTF-8 sequence");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xE0\x80\x8A\"}",
"illegal UTF-8 sequence");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF0\x80\x80\x8A\"}",
"illegal UTF-8 sequence");
// Two invalid encodings for U+00A9 (COPYRIGHT SYMBOL)
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xE0\x81\xA9\"}",
"illegal UTF-8 sequence");
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF0\x80\x81\xA9\"}",
"illegal UTF-8 sequence");
// Invalid encoding for U+20AC (EURO SYMBOL)
TestError(
"table T { F:string; }"
"root_type T;"
"{ F:\"\xF0\x82\x82\xAC\"}",
"illegal UTF-8 sequence");
// UTF-16 surrogate values between U+D800 and U+DFFF cannot be encoded in
// UTF-8
TestError(
"table T { F:string; }"
"root_type T;"
// U+10400 "encoded" as U+D801 U+DC00
"{ F:\"\xED\xA0\x81\xED\xB0\x80\"}",
"illegal UTF-8 sequence");
// Check independence of identifier from locale.
std::string locale_ident;
locale_ident += "table T { F";
locale_ident += static_cast<char>(-32); // unsigned 0xE0
locale_ident += " :string; }";
locale_ident += "root_type T;";
locale_ident += "{}";
TestError(locale_ident.c_str(), "");
}
void UnknownFieldsTest() {
flatbuffers::IDLOptions opts;
opts.skip_unexpected_fields_in_json = true;
flatbuffers::Parser parser(opts);
TEST_EQ(parser.Parse("table T { str:string; i:int;}"
"root_type T;"
"{ str:\"test\","
"unknown_string:\"test\","
"\"unknown_string\":\"test\","
"unknown_int:10,"
"unknown_float:1.0,"
"unknown_array: [ 1, 2, 3, 4],"
"unknown_object: { i: 10 },"
"\"unknown_object\": { \"i\": 10 },"
"i:10}"),
true);
std::string jsongen;
parser.opts.indent_step = -1;
auto result =
GenerateText(parser, parser.builder_.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(jsongen.c_str(), "{str: \"test\",i: 10}");
}
void ParseUnionTest() {
// Unions must be parseable with the type field following the object.
flatbuffers::Parser parser;
TEST_EQ(parser.Parse("table T { A:int; }"
"union U { T }"
"table V { X:U; }"
"root_type V;"
"{ X:{ A:1 }, X_type: T }"),
true);
// Unions must be parsable with prefixed namespace.
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse("namespace N; table A {} namespace; union U { N.A }"
"table B { e:U; } root_type B;"
"{ e_type: N_A, e: {} }"),
true);
}
void InvalidNestedFlatbufferTest() {
// First, load and parse FlatBuffer schema (.fbs)
std::string schemafile;
TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.fbs").c_str(),
false, &schemafile),
true);
auto include_test_path =
flatbuffers::ConCatPathFileName(test_data_path, "include_test");
const char *include_directories[] = { test_data_path.c_str(),
include_test_path.c_str(), nullptr };
flatbuffers::Parser parser1;
TEST_EQ(parser1.Parse(schemafile.c_str(), include_directories), true);
// "color" inside nested flatbuffer contains invalid enum value
TEST_EQ(parser1.Parse("{ name: \"Bender\", testnestedflatbuffer: { name: "
"\"Leela\", color: \"nonexistent\"}}"),
false);
}
void EvolutionTest() {
// VS10 does not support typed enums, exclude from tests
#if !defined(_MSC_VER) || _MSC_VER >= 1700
const int NUM_VERSIONS = 2;
std::string schemas[NUM_VERSIONS];
std::string jsonfiles[NUM_VERSIONS];
std::vector<uint8_t> binaries[NUM_VERSIONS];
flatbuffers::IDLOptions idl_opts;
idl_opts.lang_to_generate |= flatbuffers::IDLOptions::kBinary;
flatbuffers::Parser parser(idl_opts);
// Load all the schema versions and their associated data.
for (int i = 0; i < NUM_VERSIONS; ++i) {
std::string schema = test_data_path + "evolution_test/evolution_v" +
flatbuffers::NumToString(i + 1) + ".fbs";
TEST_ASSERT(flatbuffers::LoadFile(schema.c_str(), false, &schemas[i]));
std::string json = test_data_path + "evolution_test/evolution_v" +
flatbuffers::NumToString(i + 1) + ".json";
TEST_ASSERT(flatbuffers::LoadFile(json.c_str(), false, &jsonfiles[i]));
TEST_ASSERT(parser.Parse(schemas[i].c_str()));
TEST_ASSERT(parser.Parse(jsonfiles[i].c_str()));
auto bufLen = parser.builder_.GetSize();
auto buf = parser.builder_.GetBufferPointer();
binaries[i].reserve(bufLen);
std::copy(buf, buf + bufLen, std::back_inserter(binaries[i]));
}
// Assert that all the verifiers for the different schema versions properly
// verify any version data.
for (int i = 0; i < NUM_VERSIONS; ++i) {
flatbuffers::Verifier verifier(&binaries[i].front(), binaries[i].size());
TEST_ASSERT(Evolution::V1::VerifyRootBuffer(verifier));
TEST_ASSERT(Evolution::V2::VerifyRootBuffer(verifier));
}
// Test backwards compatibility by reading old data with an evolved schema.
auto root_v1_viewed_from_v2 = Evolution::V2::GetRoot(&binaries[0].front());
// field 'k' is new in version 2, so it should be null.
TEST_ASSERT(nullptr == root_v1_viewed_from_v2->k());
// field 'l' is new in version 2 with a default of 56.
TEST_EQ(root_v1_viewed_from_v2->l(), 56);
// field 'c' of 'TableA' is new in version 2, so it should be null.
TEST_ASSERT(nullptr == root_v1_viewed_from_v2->e()->c());
// 'TableC' was added to field 'c' union in version 2, so it should be null.
TEST_ASSERT(nullptr == root_v1_viewed_from_v2->c_as_TableC());
// The field 'c' union should be of type 'TableB' regardless of schema version
TEST_ASSERT(root_v1_viewed_from_v2->c_type() == Evolution::V2::Union::TableB);
// The field 'f' was renamed to 'ff' in version 2, it should still be
// readable.
TEST_EQ(root_v1_viewed_from_v2->ff()->a(), 16);
// Test forwards compatibility by reading new data with an old schema.
auto root_v2_viewed_from_v1 = Evolution::V1::GetRoot(&binaries[1].front());
// The field 'c' union in version 2 is a new table (index = 3) and should
// still be accessible, but not interpretable.
TEST_EQ(static_cast<uint8_t>(root_v2_viewed_from_v1->c_type()), 3);
TEST_NOTNULL(root_v2_viewed_from_v1->c());
// The field 'd' enum in verison 2 has new members and should still be
// accessible, but not interpretable.
TEST_EQ(static_cast<int8_t>(root_v2_viewed_from_v1->d()), 3);
// The field 'a' in version 2 is deprecated and should return the default
// value (0) instead of the value stored in the in the buffer (42).
TEST_EQ(root_v2_viewed_from_v1->a(), 0);
// The field 'ff' was originally named 'f' in version 1, it should still be
// readable.
TEST_EQ(root_v2_viewed_from_v1->f()->a(), 35);
#endif
}
void UnionDeprecationTest() {
const int NUM_VERSIONS = 2;
std::string schemas[NUM_VERSIONS];
std::string jsonfiles[NUM_VERSIONS];
std::vector<uint8_t> binaries[NUM_VERSIONS];
flatbuffers::IDLOptions idl_opts;
idl_opts.lang_to_generate |= flatbuffers::IDLOptions::kBinary;
flatbuffers::Parser parser(idl_opts);
// Load all the schema versions and their associated data.
for (int i = 0; i < NUM_VERSIONS; ++i) {
std::string schema = test_data_path + "evolution_test/evolution_v" +
flatbuffers::NumToString(i + 1) + ".fbs";
TEST_ASSERT(flatbuffers::LoadFile(schema.c_str(), false, &schemas[i]));
std::string json = test_data_path + "evolution_test/evolution_v" +
flatbuffers::NumToString(i + 1) + ".json";
TEST_ASSERT(flatbuffers::LoadFile(json.c_str(), false, &jsonfiles[i]));
TEST_ASSERT(parser.Parse(schemas[i].c_str()));
TEST_ASSERT(parser.Parse(jsonfiles[i].c_str()));
auto bufLen = parser.builder_.GetSize();
auto buf = parser.builder_.GetBufferPointer();
binaries[i].reserve(bufLen);
std::copy(buf, buf + bufLen, std::back_inserter(binaries[i]));
}
auto v2 = parser.LookupStruct("Evolution.V2.Root");
TEST_NOTNULL(v2);
auto j_type_field = v2->fields.Lookup("j_type");
TEST_NOTNULL(j_type_field);
TEST_ASSERT(j_type_field->deprecated);
}
void UnionVectorTest() {
// load FlatBuffer fbs schema and json.
std::string schemafile, jsonfile;
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "union_vector/union_vector.fbs").c_str(), false,
&schemafile),
true);
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "union_vector/union_vector.json").c_str(),
false, &jsonfile),
true);
// parse schema.
flatbuffers::IDLOptions idl_opts;
idl_opts.lang_to_generate |= flatbuffers::IDLOptions::kBinary;
flatbuffers::Parser parser(idl_opts);
TEST_EQ(parser.Parse(schemafile.c_str()), true);
flatbuffers::FlatBufferBuilder fbb;
// union types.
std::vector<uint8_t> types;
types.push_back(static_cast<uint8_t>(Character_Belle));
types.push_back(static_cast<uint8_t>(Character_MuLan));
types.push_back(static_cast<uint8_t>(Character_BookFan));
types.push_back(static_cast<uint8_t>(Character_Other));
types.push_back(static_cast<uint8_t>(Character_Unused));
// union values.
std::vector<flatbuffers::Offset<void>> characters;
characters.push_back(fbb.CreateStruct(BookReader(/*books_read=*/7)).Union());
characters.push_back(CreateAttacker(fbb, /*sword_attack_damage=*/5).Union());
characters.push_back(fbb.CreateStruct(BookReader(/*books_read=*/2)).Union());
characters.push_back(fbb.CreateString("Other").Union());
characters.push_back(fbb.CreateString("Unused").Union());
// create Movie.
const auto movie_offset =
CreateMovie(fbb, Character_Rapunzel,
fbb.CreateStruct(Rapunzel(/*hair_length=*/6)).Union(),
fbb.CreateVector(types), fbb.CreateVector(characters));
FinishMovieBuffer(fbb, movie_offset);
flatbuffers::Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize());
TEST_EQ(VerifyMovieBuffer(verifier), true);
auto flat_movie = GetMovie(fbb.GetBufferPointer());
auto TestMovie = [](const Movie *movie) {
TEST_EQ(movie->main_character_type() == Character_Rapunzel, true);
auto cts = movie->characters_type();
TEST_EQ(movie->characters_type()->size(), 5);
TEST_EQ(cts->GetEnum<Character>(0) == Character_Belle, true);
TEST_EQ(cts->GetEnum<Character>(1) == Character_MuLan, true);
TEST_EQ(cts->GetEnum<Character>(2) == Character_BookFan, true);
TEST_EQ(cts->GetEnum<Character>(3) == Character_Other, true);
TEST_EQ(cts->GetEnum<Character>(4) == Character_Unused, true);
auto rapunzel = movie->main_character_as_Rapunzel();
TEST_NOTNULL(rapunzel);
TEST_EQ(rapunzel->hair_length(), 6);
auto cs = movie->characters();
TEST_EQ(cs->size(), 5);
auto belle = cs->GetAs<BookReader>(0);
TEST_EQ(belle->books_read(), 7);
auto mu_lan = cs->GetAs<Attacker>(1);
TEST_EQ(mu_lan->sword_attack_damage(), 5);
auto book_fan = cs->GetAs<BookReader>(2);
TEST_EQ(book_fan->books_read(), 2);
auto other = cs->GetAsString(3);
TEST_EQ_STR(other->c_str(), "Other");
auto unused = cs->GetAsString(4);
TEST_EQ_STR(unused->c_str(), "Unused");
};
TestMovie(flat_movie);
// Also test the JSON we loaded above.
TEST_EQ(parser.Parse(jsonfile.c_str()), true);
auto jbuf = parser.builder_.GetBufferPointer();
flatbuffers::Verifier jverifier(jbuf, parser.builder_.GetSize());
TEST_EQ(VerifyMovieBuffer(jverifier), true);
TestMovie(GetMovie(jbuf));
auto movie_object = flat_movie->UnPack();
TEST_EQ(movie_object->main_character.AsRapunzel()->hair_length(), 6);
TEST_EQ(movie_object->characters[0].AsBelle()->books_read(), 7);
TEST_EQ(movie_object->characters[1].AsMuLan()->sword_attack_damage, 5);
TEST_EQ(movie_object->characters[2].AsBookFan()->books_read(), 2);
TEST_EQ_STR(movie_object->characters[3].AsOther()->c_str(), "Other");
TEST_EQ_STR(movie_object->characters[4].AsUnused()->c_str(), "Unused");
fbb.Clear();
fbb.Finish(Movie::Pack(fbb, movie_object));
delete movie_object;
auto repacked_movie = GetMovie(fbb.GetBufferPointer());
TestMovie(repacked_movie);
// Generate text using mini-reflection.
auto s =
flatbuffers::FlatBufferToString(fbb.GetBufferPointer(), MovieTypeTable());
TEST_EQ_STR(
s.c_str(),
"{ main_character_type: Rapunzel, main_character: { hair_length: 6 }, "
"characters_type: [ Belle, MuLan, BookFan, Other, Unused ], "
"characters: [ { books_read: 7 }, { sword_attack_damage: 5 }, "
"{ books_read: 2 }, \"Other\", \"Unused\" ] }");
flatbuffers::ToStringVisitor visitor("\n", true, " ");
IterateFlatBuffer(fbb.GetBufferPointer(), MovieTypeTable(), &visitor);
TEST_EQ_STR(visitor.s.c_str(),
"{\n"
" \"main_character_type\": \"Rapunzel\",\n"
" \"main_character\": {\n"
" \"hair_length\": 6\n"
" },\n"
" \"characters_type\": [\n"
" \"Belle\",\n"
" \"MuLan\",\n"
" \"BookFan\",\n"
" \"Other\",\n"
" \"Unused\"\n"
" ],\n"
" \"characters\": [\n"
" {\n"
" \"books_read\": 7\n"
" },\n"
" {\n"
" \"sword_attack_damage\": 5\n"
" },\n"
" {\n"
" \"books_read\": 2\n"
" },\n"
" \"Other\",\n"
" \"Unused\"\n"
" ]\n"
"}");
// Generate text using parsed schema.
std::string jsongen;
auto result = GenerateText(parser, fbb.GetBufferPointer(), &jsongen);
TEST_EQ(result, true);
TEST_EQ_STR(jsongen.c_str(),
"{\n"
" main_character_type: \"Rapunzel\",\n"
" main_character: {\n"
" hair_length: 6\n"
" },\n"
" characters_type: [\n"
" \"Belle\",\n"
" \"MuLan\",\n"
" \"BookFan\",\n"
" \"Other\",\n"
" \"Unused\"\n"
" ],\n"
" characters: [\n"
" {\n"
" books_read: 7\n"
" },\n"
" {\n"
" sword_attack_damage: 5\n"
" },\n"
" {\n"
" books_read: 2\n"
" },\n"
" \"Other\",\n"
" \"Unused\"\n"
" ]\n"
"}\n");
// Simple test with reflection.
parser.Serialize();
auto schema = reflection::GetSchema(parser.builder_.GetBufferPointer());
auto ok = flatbuffers::Verify(*schema, *schema->root_table(),
fbb.GetBufferPointer(), fbb.GetSize());
TEST_EQ(ok, true);
flatbuffers::Parser parser2(idl_opts);
TEST_EQ(parser2.Parse("struct Bool { b:bool; }"
"union Any { Bool }"
"table Root { a:Any; }"
"root_type Root;"),
true);
TEST_EQ(parser2.Parse("{a_type:Bool,a:{b:true}}"), true);
}
void ConformTest() {
flatbuffers::Parser parser;
TEST_EQ(parser.Parse("table T { A:int; } enum E:byte { A }"), true);
auto test_conform = [](flatbuffers::Parser &parser1, const char *test,
const char *expected_err) {
flatbuffers::Parser parser2;
TEST_EQ(parser2.Parse(test), true);
auto err = parser2.ConformTo(parser1);
TEST_NOTNULL(strstr(err.c_str(), expected_err));
};
test_conform(parser, "table T { A:byte; }", "types differ for field");
test_conform(parser, "table T { B:int; A:int; }", "offsets differ for field");
test_conform(parser, "table T { A:int = 1; }", "defaults differ for field");
test_conform(parser, "table T { B:float; }",
"field renamed to different type");
test_conform(parser, "enum E:byte { B, A }", "values differ for enum");
}
void ParseProtoBufAsciiTest() {
// We can put the parser in a mode where it will accept JSON that looks more
// like Protobuf ASCII, for users that have data in that format.
// This uses no "" for field names (which we already support by default,
// omits `,`, `:` before `{` and a couple of other features.
flatbuffers::Parser parser;
parser.opts.protobuf_ascii_alike = true;
TEST_EQ(
parser.Parse("table S { B:int; } table T { A:[int]; C:S; } root_type T;"),
true);
TEST_EQ(parser.Parse("{ A [1 2] C { B:2 }}"), true);
// Similarly, in text output, it should omit these.
std::string text;
auto ok = flatbuffers::GenerateText(
parser, parser.builder_.GetBufferPointer(), &text);
TEST_EQ(ok, true);
TEST_EQ_STR(text.c_str(),
"{\n A [\n 1\n 2\n ]\n C {\n B: 2\n }\n}\n");
}
void FlexBuffersTest() {
flexbuffers::Builder slb(512,
flexbuffers::BUILDER_FLAG_SHARE_KEYS_AND_STRINGS);
// Write the equivalent of:
// { vec: [ -100, "Fred", 4.0, false ], bar: [ 1, 2, 3 ], bar3: [ 1, 2, 3 ],
// foo: 100, bool: true, mymap: { foo: "Fred" } }
// clang-format off
#ifndef FLATBUFFERS_CPP98_STL
// It's possible to do this without std::function support as well.
slb.Map([&]() {
slb.Vector("vec", [&]() {
slb += -100; // Equivalent to slb.Add(-100) or slb.Int(-100);
slb += "Fred";
slb.IndirectFloat(4.0f);
auto i_f = slb.LastValue();
uint8_t blob[] = { 77 };
slb.Blob(blob, 1);
slb += false;
slb.ReuseValue(i_f);
});
int ints[] = { 1, 2, 3 };
slb.Vector("bar", ints, 3);
slb.FixedTypedVector("bar3", ints, 3);
bool bools[] = {true, false, true, false};
slb.Vector("bools", bools, 4);
slb.Bool("bool", true);
slb.Double("foo", 100);
slb.Map("mymap", [&]() {
slb.String("foo", "Fred"); // Testing key and string reuse.
});
});
slb.Finish();
#else
// It's possible to do this without std::function support as well.
slb.Map([](flexbuffers::Builder& slb2) {
slb2.Vector("vec", [](flexbuffers::Builder& slb3) {
slb3 += -100; // Equivalent to slb.Add(-100) or slb.Int(-100);
slb3 += "Fred";
slb3.IndirectFloat(4.0f);
auto i_f = slb3.LastValue();
uint8_t blob[] = { 77 };
slb3.Blob(blob, 1);
slb3 += false;
slb3.ReuseValue(i_f);
}, slb2);
int ints[] = { 1, 2, 3 };
slb2.Vector("bar", ints, 3);
slb2.FixedTypedVector("bar3", ints, 3);
slb2.Bool("bool", true);
slb2.Double("foo", 100);
slb2.Map("mymap", [](flexbuffers::Builder& slb3) {
slb3.String("foo", "Fred"); // Testing key and string reuse.
}, slb2);
}, slb);
slb.Finish();
#endif // FLATBUFFERS_CPP98_STL
#ifdef FLATBUFFERS_TEST_VERBOSE
for (size_t i = 0; i < slb.GetBuffer().size(); i++)
printf("%d ", flatbuffers::vector_data(slb.GetBuffer())[i]);
printf("\n");
#endif
// clang-format on
auto map = flexbuffers::GetRoot(slb.GetBuffer()).AsMap();
TEST_EQ(map.size(), 7);
auto vec = map["vec"].AsVector();
TEST_EQ(vec.size(), 6);
TEST_EQ(vec[0].AsInt64(), -100);
TEST_EQ_STR(vec[1].AsString().c_str(), "Fred");
TEST_EQ(vec[1].AsInt64(), 0); // Number parsing failed.
TEST_EQ(vec[2].AsDouble(), 4.0);
TEST_EQ(vec[2].AsString().IsTheEmptyString(), true); // Wrong Type.
TEST_EQ_STR(vec[2].AsString().c_str(), ""); // This still works though.
TEST_EQ_STR(vec[2].ToString().c_str(), "4.0"); // Or have it converted.
// Few tests for templated version of As.
TEST_EQ(vec[0].As<int64_t>(), -100);
TEST_EQ_STR(vec[1].As<std::string>().c_str(), "Fred");
TEST_EQ(vec[1].As<int64_t>(), 0); // Number parsing failed.
TEST_EQ(vec[2].As<double>(), 4.0);
// Test that the blob can be accessed.
TEST_EQ(vec[3].IsBlob(), true);
auto blob = vec[3].AsBlob();
TEST_EQ(blob.size(), 1);
TEST_EQ(blob.data()[0], 77);
TEST_EQ(vec[4].IsBool(), true); // Check if type is a bool
TEST_EQ(vec[4].AsBool(), false); // Check if value is false
TEST_EQ(vec[5].AsDouble(), 4.0); // This is shared with vec[2] !
auto tvec = map["bar"].AsTypedVector();
TEST_EQ(tvec.size(), 3);
TEST_EQ(tvec[2].AsInt8(), 3);
auto tvec3 = map["bar3"].AsFixedTypedVector();
TEST_EQ(tvec3.size(), 3);
TEST_EQ(tvec3[2].AsInt8(), 3);
TEST_EQ(map["bool"].AsBool(), true);
auto tvecb = map["bools"].AsTypedVector();
TEST_EQ(tvecb.ElementType(), flexbuffers::FBT_BOOL);
TEST_EQ(map["foo"].AsUInt8(), 100);
TEST_EQ(map["unknown"].IsNull(), true);
auto mymap = map["mymap"].AsMap();
// These should be equal by pointer equality, since key and value are shared.
TEST_EQ(mymap.Keys()[0].AsKey(), map.Keys()[4].AsKey());
TEST_EQ(mymap.Values()[0].AsString().c_str(), vec[1].AsString().c_str());
// We can mutate values in the buffer.
TEST_EQ(vec[0].MutateInt(-99), true);
TEST_EQ(vec[0].AsInt64(), -99);
TEST_EQ(vec[1].MutateString("John"), true); // Size must match.
TEST_EQ_STR(vec[1].AsString().c_str(), "John");
TEST_EQ(vec[1].MutateString("Alfred"), false); // Too long.
TEST_EQ(vec[2].MutateFloat(2.0f), true);
TEST_EQ(vec[2].AsFloat(), 2.0f);
TEST_EQ(vec[2].MutateFloat(3.14159), false); // Double does not fit in float.
TEST_EQ(vec[4].AsBool(), false); // Is false before change
TEST_EQ(vec[4].MutateBool(true), true); // Can change a bool
TEST_EQ(vec[4].AsBool(), true); // Changed bool is now true
// Parse from JSON:
flatbuffers::Parser parser;
slb.Clear();
auto jsontest = "{ a: [ 123, 456.0 ], b: \"hello\", c: true, d: false }";
TEST_EQ(parser.ParseFlexBuffer(jsontest, nullptr, &slb), true);
auto jroot = flexbuffers::GetRoot(slb.GetBuffer());
auto jmap = jroot.AsMap();
auto jvec = jmap["a"].AsVector();
TEST_EQ(jvec[0].AsInt64(), 123);
TEST_EQ(jvec[1].AsDouble(), 456.0);
TEST_EQ_STR(jmap["b"].AsString().c_str(), "hello");
TEST_EQ(jmap["c"].IsBool(), true); // Parsed correctly to a bool
TEST_EQ(jmap["c"].AsBool(), true); // Parsed correctly to true
TEST_EQ(jmap["d"].IsBool(), true); // Parsed correctly to a bool
TEST_EQ(jmap["d"].AsBool(), false); // Parsed correctly to false
// And from FlexBuffer back to JSON:
auto jsonback = jroot.ToString();
TEST_EQ_STR(jsontest, jsonback.c_str());
slb.Clear();
slb.Vector([&]() {
for (int i = 0; i < 130; ++i) slb.Add(static_cast<uint8_t>(255));
slb.Vector([&]() {
for (int i = 0; i < 130; ++i) slb.Add(static_cast<uint8_t>(255));
slb.Vector([] {});
});
});
slb.Finish();
TEST_EQ(slb.GetSize(), 664);
}
void FlexBuffersFloatingPointTest() {
#if defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
flexbuffers::Builder slb(512,
flexbuffers::BUILDER_FLAG_SHARE_KEYS_AND_STRINGS);
// Parse floating-point values from JSON:
flatbuffers::Parser parser;
slb.Clear();
auto jsontest =
"{ a: [1.0, nan, inf, infinity, -inf, +inf, -infinity, 8.0] }";
TEST_EQ(parser.ParseFlexBuffer(jsontest, nullptr, &slb), true);
auto jroot = flexbuffers::GetRoot(slb.GetBuffer());
auto jmap = jroot.AsMap();
auto jvec = jmap["a"].AsVector();
TEST_EQ(8, jvec.size());
TEST_EQ(1.0, jvec[0].AsDouble());
TEST_ASSERT(is_quiet_nan(jvec[1].AsDouble()));
TEST_EQ(infinity_d, jvec[2].AsDouble());
TEST_EQ(infinity_d, jvec[3].AsDouble());
TEST_EQ(-infinity_d, jvec[4].AsDouble());
TEST_EQ(+infinity_d, jvec[5].AsDouble());
TEST_EQ(-infinity_d, jvec[6].AsDouble());
TEST_EQ(8.0, jvec[7].AsDouble());
#endif
}
void FlexBuffersDeprecatedTest() {
// FlexBuffers as originally designed had a flaw involving the
// FBT_VECTOR_STRING datatype, and this test documents/tests the fix for it.
// Discussion: https://github.com/google/flatbuffers/issues/5627
flexbuffers::Builder slb;
// FBT_VECTOR_* are "typed vectors" where all elements are of the same type.
// Problem is, when storing FBT_STRING elements, it relies on that type to
// get the bit-width for the size field of the string, which in this case
// isn't present, and instead defaults to 8-bit. This means that any strings
// stored inside such a vector, when accessed thru the old API that returns
// a String reference, will appear to be truncated if the string stored is
// actually >=256 bytes.
std::string test_data(300, 'A');
auto start = slb.StartVector();
// This one will have a 16-bit size field.
slb.String(test_data);
// This one will have an 8-bit size field.
slb.String("hello");
// We're asking this to be serialized as a typed vector (true), but not
// fixed size (false). The type will be FBT_VECTOR_STRING with a bit-width
// of whatever the offsets in the vector need, the bit-widths of the strings
// are not stored(!) <- the actual design flaw.
// Note that even in the fixed code, we continue to serialize the elements of
// FBT_VECTOR_STRING as FBT_STRING, since there may be old code out there
// reading new data that we want to continue to function.
// Thus, FBT_VECTOR_STRING, while deprecated, will always be represented the
// same way, the fix lies on the reading side.
slb.EndVector(start, true, false);
slb.Finish();
// So now lets read this data back.
// For existing data, since we have no way of knowing what the actual
// bit-width of the size field of the string is, we are going to ignore this
// field, and instead treat these strings as FBT_KEY (null-terminated), so we
// can deal with strings of arbitrary length. This of course truncates strings
// with embedded nulls, but we think that that is preferrable over truncating
// strings >= 256 bytes.
auto vec = flexbuffers::GetRoot(slb.GetBuffer()).AsTypedVector();
// Even though this was serialized as FBT_VECTOR_STRING, it is read as
// FBT_VECTOR_KEY:
TEST_EQ(vec.ElementType(), flexbuffers::FBT_KEY);
// Access the long string. Previously, this would return a string of size 1,
// since it would read the high-byte of the 16-bit length.
// This should now correctly test the full 300 bytes, using AsKey():
TEST_EQ_STR(vec[0].AsKey(), test_data.c_str());
// Old code that called AsString will continue to work, as the String
// accessor objects now use a cached size that can come from a key as well.
TEST_EQ_STR(vec[0].AsString().c_str(), test_data.c_str());
// Short strings work as before:
TEST_EQ_STR(vec[1].AsKey(), "hello");
TEST_EQ_STR(vec[1].AsString().c_str(), "hello");
// So, while existing code and data mostly "just work" with the fixes applied
// to AsTypedVector and AsString, what do you do going forward?
// Code accessing existing data doesn't necessarily need to change, though
// you could consider using AsKey instead of AsString for a) documenting
// that you are accessing keys, or b) a speedup if you don't actually use
// the string size.
// For new data, or data that doesn't need to be backwards compatible,
// instead serialize as FBT_VECTOR (call EndVector with typed = false, then
// read elements with AsString), or, for maximum compactness, use
// FBT_VECTOR_KEY (call slb.Key above instead, read with AsKey or AsString).
}
void TypeAliasesTest() {
flatbuffers::FlatBufferBuilder builder;
builder.Finish(CreateTypeAliases(
builder, flatbuffers::numeric_limits<int8_t>::min(),
flatbuffers::numeric_limits<uint8_t>::max(),
flatbuffers::numeric_limits<int16_t>::min(),
flatbuffers::numeric_limits<uint16_t>::max(),
flatbuffers::numeric_limits<int32_t>::min(),
flatbuffers::numeric_limits<uint32_t>::max(),
flatbuffers::numeric_limits<int64_t>::min(),
flatbuffers::numeric_limits<uint64_t>::max(), 2.3f, 2.3));
auto p = builder.GetBufferPointer();
auto ta = flatbuffers::GetRoot<TypeAliases>(p);
TEST_EQ(ta->i8(), flatbuffers::numeric_limits<int8_t>::min());
TEST_EQ(ta->u8(), flatbuffers::numeric_limits<uint8_t>::max());
TEST_EQ(ta->i16(), flatbuffers::numeric_limits<int16_t>::min());
TEST_EQ(ta->u16(), flatbuffers::numeric_limits<uint16_t>::max());
TEST_EQ(ta->i32(), flatbuffers::numeric_limits<int32_t>::min());
TEST_EQ(ta->u32(), flatbuffers::numeric_limits<uint32_t>::max());
TEST_EQ(ta->i64(), flatbuffers::numeric_limits<int64_t>::min());
TEST_EQ(ta->u64(), flatbuffers::numeric_limits<uint64_t>::max());
TEST_EQ(ta->f32(), 2.3f);
TEST_EQ(ta->f64(), 2.3);
using namespace flatbuffers; // is_same
static_assert(is_same<decltype(ta->i8()), int8_t>::value, "invalid type");
static_assert(is_same<decltype(ta->i16()), int16_t>::value, "invalid type");
static_assert(is_same<decltype(ta->i32()), int32_t>::value, "invalid type");
static_assert(is_same<decltype(ta->i64()), int64_t>::value, "invalid type");
static_assert(is_same<decltype(ta->u8()), uint8_t>::value, "invalid type");
static_assert(is_same<decltype(ta->u16()), uint16_t>::value, "invalid type");
static_assert(is_same<decltype(ta->u32()), uint32_t>::value, "invalid type");
static_assert(is_same<decltype(ta->u64()), uint64_t>::value, "invalid type");
static_assert(is_same<decltype(ta->f32()), float>::value, "invalid type");
static_assert(is_same<decltype(ta->f64()), double>::value, "invalid type");
}
void EndianSwapTest() {
TEST_EQ(flatbuffers::EndianSwap(static_cast<int16_t>(0x1234)), 0x3412);
TEST_EQ(flatbuffers::EndianSwap(static_cast<int32_t>(0x12345678)),
0x78563412);
TEST_EQ(flatbuffers::EndianSwap(static_cast<int64_t>(0x1234567890ABCDEF)),
0xEFCDAB9078563412);
TEST_EQ(flatbuffers::EndianSwap(flatbuffers::EndianSwap(3.14f)), 3.14f);
}
void UninitializedVectorTest() {
flatbuffers::FlatBufferBuilder builder;
Test *buf = nullptr;
auto vector_offset =
builder.CreateUninitializedVectorOfStructs<Test>(2, &buf);
TEST_NOTNULL(buf);
buf[0] = Test(10, 20);
buf[1] = Test(30, 40);
auto required_name = builder.CreateString("myMonster");
auto monster_builder = MonsterBuilder(builder);
monster_builder.add_name(
required_name); // required field mandated for monster.
monster_builder.add_test4(vector_offset);
builder.Finish(monster_builder.Finish());
auto p = builder.GetBufferPointer();
auto uvt = flatbuffers::GetRoot<Monster>(p);
TEST_NOTNULL(uvt);
auto vec = uvt->test4();
TEST_NOTNULL(vec);
auto test_0 = vec->Get(0);
auto test_1 = vec->Get(1);
TEST_EQ(test_0->a(), 10);
TEST_EQ(test_0->b(), 20);
TEST_EQ(test_1->a(), 30);
TEST_EQ(test_1->b(), 40);
}
void EqualOperatorTest() {
MonsterT a;
MonsterT b;
TEST_EQ(b == a, true);
TEST_EQ(b != a, false);
b.mana = 33;
TEST_EQ(b == a, false);
TEST_EQ(b != a, true);
b.mana = 150;
TEST_EQ(b == a, true);
TEST_EQ(b != a, false);
b.inventory.push_back(3);
TEST_EQ(b == a, false);
TEST_EQ(b != a, true);
b.inventory.clear();
TEST_EQ(b == a, true);
TEST_EQ(b != a, false);
b.test.type = Any_Monster;
TEST_EQ(b == a, false);
TEST_EQ(b != a, true);
}
// For testing any binaries, e.g. from fuzzing.
void LoadVerifyBinaryTest() {
std::string binary;
if (flatbuffers::LoadFile(
(test_data_path + "fuzzer/your-filename-here").c_str(), true,
&binary)) {
flatbuffers::Verifier verifier(
reinterpret_cast<const uint8_t *>(binary.data()), binary.size());
TEST_EQ(VerifyMonsterBuffer(verifier), true);
}
}
void CreateSharedStringTest() {
flatbuffers::FlatBufferBuilder builder;
const auto one1 = builder.CreateSharedString("one");
const auto two = builder.CreateSharedString("two");
const auto one2 = builder.CreateSharedString("one");
TEST_EQ(one1.o, one2.o);
const auto onetwo = builder.CreateSharedString("onetwo");
TEST_EQ(onetwo.o != one1.o, true);
TEST_EQ(onetwo.o != two.o, true);
// Support for embedded nulls
const char chars_b[] = { 'a', '\0', 'b' };
const char chars_c[] = { 'a', '\0', 'c' };
const auto null_b1 = builder.CreateSharedString(chars_b, sizeof(chars_b));
const auto null_c = builder.CreateSharedString(chars_c, sizeof(chars_c));
const auto null_b2 = builder.CreateSharedString(chars_b, sizeof(chars_b));
TEST_EQ(null_b1.o != null_c.o, true); // Issue#5058 repro
TEST_EQ(null_b1.o, null_b2.o);
// Put the strings into an array for round trip verification.
const flatbuffers::Offset<flatbuffers::String> array[7] = {
one1, two, one2, onetwo, null_b1, null_c, null_b2
};
const auto vector_offset =
builder.CreateVector(array, flatbuffers::uoffset_t(7));
MonsterBuilder monster_builder(builder);
monster_builder.add_name(two);
monster_builder.add_testarrayofstring(vector_offset);
builder.Finish(monster_builder.Finish());
// Read the Monster back.
const auto *monster =
flatbuffers::GetRoot<Monster>(builder.GetBufferPointer());
TEST_EQ_STR(monster->name()->c_str(), "two");
const auto *testarrayofstring = monster->testarrayofstring();
TEST_EQ(testarrayofstring->size(), flatbuffers::uoffset_t(7));
const auto &a = *testarrayofstring;
TEST_EQ_STR(a[0]->c_str(), "one");
TEST_EQ_STR(a[1]->c_str(), "two");
TEST_EQ_STR(a[2]->c_str(), "one");
TEST_EQ_STR(a[3]->c_str(), "onetwo");
TEST_EQ(a[4]->str(), (std::string(chars_b, sizeof(chars_b))));
TEST_EQ(a[5]->str(), (std::string(chars_c, sizeof(chars_c))));
TEST_EQ(a[6]->str(), (std::string(chars_b, sizeof(chars_b))));
// Make sure String::operator< works, too, since it is related to
// StringOffsetCompare.
TEST_EQ((*a[0]) < (*a[1]), true);
TEST_EQ((*a[1]) < (*a[0]), false);
TEST_EQ((*a[1]) < (*a[2]), false);
TEST_EQ((*a[2]) < (*a[1]), true);
TEST_EQ((*a[4]) < (*a[3]), true);
TEST_EQ((*a[5]) < (*a[4]), false);
TEST_EQ((*a[5]) < (*a[4]), false);
TEST_EQ((*a[6]) < (*a[5]), true);
}
#if !defined(FLATBUFFERS_SPAN_MINIMAL)
void FlatbuffersSpanTest() {
// Compile-time checking of non-const [] to const [] conversions.
using flatbuffers::internal::is_span_convertable;
(void)is_span_convertable<int, 1, int, 1>::type(123);
(void)is_span_convertable<const int, 1, int, 1>::type(123);
(void)is_span_convertable<const int64_t, 1, int64_t, 1>::type(123);
(void)is_span_convertable<const uint64_t, 1, uint64_t, 1>::type(123);
(void)is_span_convertable<const int, 1, const int, 1>::type(123);
(void)is_span_convertable<const int64_t, 1, const int64_t, 1>::type(123);
(void)is_span_convertable<const uint64_t, 1, const uint64_t, 1>::type(123);
using flatbuffers::span;
span<char, 0> c1;
TEST_EQ(c1.size(), 0);
span<char, flatbuffers::dynamic_extent> c2;
TEST_EQ(c2.size(), 0);
span<char> c3;
TEST_EQ(c3.size(), 0);
TEST_ASSERT(c1.empty() && c2.empty() && c3.empty());
int i_data7[7] = { 0, 1, 2, 3, 4, 5, 6 };
span<int, 7> i1(&i_data7[0], 7);
span<int> i2(i1); // make dynamic from static
TEST_EQ(i1.size(), 7);
TEST_EQ(i1.empty(), false);
TEST_EQ(i1.size(), i2.size());
TEST_EQ(i1.data(), i_data7);
TEST_EQ(i1[2], 2);
// Make const span from a non-const one.
span<const int, 7> i3(i1);
// Construct from a C-array.
span<int, 7> i4(i_data7);
span<const int, 7> i5(i_data7);
span<int> i6(i_data7);
span<const int> i7(i_data7);
TEST_EQ(i7.size(), 7);
// Check construction from a const array.
const int i_cdata5[5] = { 4, 3, 2, 1, 0 };
span<const int, 5> i8(i_cdata5);
span<const int> i9(i_cdata5);
TEST_EQ(i9.size(), 5);
// Construction from a (ptr, size) pair.
span<int, 7> i10(i_data7, 7);
span<int> i11(i_data7, 7);
TEST_EQ(i11.size(), 7);
span<const int, 5> i12(i_cdata5, 5);
span<const int> i13(i_cdata5, 5);
TEST_EQ(i13.size(), 5);
// Construction from std::array.
std::array<int, 6> i_arr6 = { { 0, 1, 2, 3, 4, 5 } };
span<int, 6> i14(i_arr6);
span<const int, 6> i15(i_arr6);
span<int> i16(i_arr6);
span<const int> i17(i_arr6);
TEST_EQ(i17.size(), 6);
const std::array<int, 8> i_carr8 = { { 0, 1, 2, 3, 4, 5, 6, 7 } };
span<const int, 8> i18(i_carr8);
span<const int> i19(i_carr8);
TEST_EQ(i18.size(), 8);
TEST_EQ(i19.size(), 8);
TEST_EQ(i19[7], 7);
// Check compatibility with flatbuffers::Array.
int fbs_int3_underlaying[3] = { 0 };
int fbs_int3_data[3] = { 1, 2, 3 };
auto &fbs_int3 = flatbuffers::CastToArray(fbs_int3_underlaying);
fbs_int3.CopyFromSpan(fbs_int3_data);
TEST_EQ(fbs_int3.Get(1), 2);
const int fbs_cint3_data[3] = { 2, 3, 4 };
fbs_int3.CopyFromSpan(fbs_cint3_data);
TEST_EQ(fbs_int3.Get(1), 3);
// Check with Array<Enum, N>
enum class Dummy : uint16_t { Zero = 0, One, Two };
Dummy fbs_dummy3_underlaying[3] = {};
Dummy fbs_dummy3_data[3] = { Dummy::One, Dummy::Two, Dummy::Two };
auto &fbs_dummy3 = flatbuffers::CastToArray(fbs_dummy3_underlaying);
fbs_dummy3.CopyFromSpan(fbs_dummy3_data);
TEST_EQ(fbs_dummy3.Get(1), Dummy::Two);
}
#else
void FlatbuffersSpanTest() {}
#endif
void FixedLengthArrayTest() {
// VS10 does not support typed enums, exclude from tests
#if !defined(_MSC_VER) || _MSC_VER >= 1700
// Generate an ArrayTable containing one ArrayStruct.
flatbuffers::FlatBufferBuilder fbb;
MyGame::Example::NestedStruct nStruct0(MyGame::Example::TestEnum::B);
TEST_NOTNULL(nStruct0.mutable_a());
nStruct0.mutable_a()->Mutate(0, 1);
nStruct0.mutable_a()->Mutate(1, 2);
TEST_NOTNULL(nStruct0.mutable_c());
nStruct0.mutable_c()->Mutate(0, MyGame::Example::TestEnum::C);
nStruct0.mutable_c()->Mutate(1, MyGame::Example::TestEnum::A);
TEST_NOTNULL(nStruct0.mutable_d());
nStruct0.mutable_d()->Mutate(0, flatbuffers::numeric_limits<int64_t>::max());
nStruct0.mutable_d()->Mutate(1, flatbuffers::numeric_limits<int64_t>::min());
MyGame::Example::NestedStruct nStruct1(MyGame::Example::TestEnum::C);
TEST_NOTNULL(nStruct1.mutable_a());
nStruct1.mutable_a()->Mutate(0, 3);
nStruct1.mutable_a()->Mutate(1, 4);
TEST_NOTNULL(nStruct1.mutable_c());
nStruct1.mutable_c()->Mutate(0, MyGame::Example::TestEnum::C);
nStruct1.mutable_c()->Mutate(1, MyGame::Example::TestEnum::A);
TEST_NOTNULL(nStruct1.mutable_d());
nStruct1.mutable_d()->Mutate(0, flatbuffers::numeric_limits<int64_t>::min());
nStruct1.mutable_d()->Mutate(1, flatbuffers::numeric_limits<int64_t>::max());
MyGame::Example::ArrayStruct aStruct(2, 12, 1);
TEST_NOTNULL(aStruct.b());
TEST_NOTNULL(aStruct.mutable_b());
TEST_NOTNULL(aStruct.mutable_d());
TEST_NOTNULL(aStruct.mutable_f());
for (int i = 0; i < aStruct.b()->size(); i++)
aStruct.mutable_b()->Mutate(i, i + 1);
aStruct.mutable_d()->Mutate(0, nStruct0);
aStruct.mutable_d()->Mutate(1, nStruct1);
auto aTable = MyGame::Example::CreateArrayTable(fbb, &aStruct);
MyGame::Example::FinishArrayTableBuffer(fbb, aTable);
// Verify correctness of the ArrayTable.
flatbuffers::Verifier verifier(fbb.GetBufferPointer(), fbb.GetSize());
MyGame::Example::VerifyArrayTableBuffer(verifier);
auto p = MyGame::Example::GetMutableArrayTable(fbb.GetBufferPointer());
auto mArStruct = p->mutable_a();
TEST_NOTNULL(mArStruct);
TEST_NOTNULL(mArStruct->b());
TEST_NOTNULL(mArStruct->d());
TEST_NOTNULL(mArStruct->f());
TEST_NOTNULL(mArStruct->mutable_b());
TEST_NOTNULL(mArStruct->mutable_d());
TEST_NOTNULL(mArStruct->mutable_f());
mArStruct->mutable_b()->Mutate(14, -14);
TEST_EQ(mArStruct->a(), 2);
TEST_EQ(mArStruct->b()->size(), 15);
TEST_EQ(mArStruct->b()->Get(aStruct.b()->size() - 1), -14);
TEST_EQ(mArStruct->c(), 12);
TEST_NOTNULL(mArStruct->d()->Get(0));
TEST_NOTNULL(mArStruct->d()->Get(0)->a());
TEST_EQ(mArStruct->d()->Get(0)->a()->Get(0), 1);
TEST_EQ(mArStruct->d()->Get(0)->a()->Get(1), 2);
TEST_NOTNULL(mArStruct->d()->Get(1));
TEST_NOTNULL(mArStruct->d()->Get(1)->a());
TEST_EQ(mArStruct->d()->Get(1)->a()->Get(0), 3);
TEST_EQ(mArStruct->d()->Get(1)->a()->Get(1), 4);
TEST_NOTNULL(mArStruct->mutable_d()->GetMutablePointer(1));
TEST_NOTNULL(mArStruct->mutable_d()->GetMutablePointer(1)->mutable_a());
mArStruct->mutable_d()->GetMutablePointer(1)->mutable_a()->Mutate(1, 5);
TEST_EQ(5, mArStruct->d()->Get(1)->a()->Get(1));
TEST_EQ(MyGame::Example::TestEnum::B, mArStruct->d()->Get(0)->b());
TEST_NOTNULL(mArStruct->d()->Get(0)->c());
TEST_EQ(MyGame::Example::TestEnum::C, mArStruct->d()->Get(0)->c()->Get(0));
TEST_EQ(MyGame::Example::TestEnum::A, mArStruct->d()->Get(0)->c()->Get(1));
TEST_EQ(flatbuffers::numeric_limits<int64_t>::max(),
mArStruct->d()->Get(0)->d()->Get(0));
TEST_EQ(flatbuffers::numeric_limits<int64_t>::min(),
mArStruct->d()->Get(0)->d()->Get(1));
TEST_EQ(MyGame::Example::TestEnum::C, mArStruct->d()->Get(1)->b());
TEST_NOTNULL(mArStruct->d()->Get(1)->c());
TEST_EQ(MyGame::Example::TestEnum::C, mArStruct->d()->Get(1)->c()->Get(0));
TEST_EQ(MyGame::Example::TestEnum::A, mArStruct->d()->Get(1)->c()->Get(1));
TEST_EQ(flatbuffers::numeric_limits<int64_t>::min(),
mArStruct->d()->Get(1)->d()->Get(0));
TEST_EQ(flatbuffers::numeric_limits<int64_t>::max(),
mArStruct->d()->Get(1)->d()->Get(1));
for (int i = 0; i < mArStruct->b()->size() - 1; i++)
TEST_EQ(mArStruct->b()->Get(i), i + 1);
// Check alignment
TEST_EQ(0, reinterpret_cast<uintptr_t>(mArStruct->d()) % 8);
TEST_EQ(0, reinterpret_cast<uintptr_t>(mArStruct->f()) % 8);
// Check if default constructor set all memory zero
const size_t arr_size = sizeof(MyGame::Example::ArrayStruct);
char non_zero_memory[arr_size];
// set memory chunk of size ArrayStruct to 1's
std::memset(static_cast<void *>(non_zero_memory), 1, arr_size);
// after placement-new it should be all 0's
# if defined(_MSC_VER) && defined(_DEBUG)
# undef new
# endif
MyGame::Example::ArrayStruct *ap =
new (non_zero_memory) MyGame::Example::ArrayStruct;
# if defined(_MSC_VER) && defined(_DEBUG)
# define new DEBUG_NEW
# endif
(void)ap;
for (size_t i = 0; i < arr_size; ++i) { TEST_EQ(non_zero_memory[i], 0); }
#endif
}
#if !defined(FLATBUFFERS_SPAN_MINIMAL) && \
(!defined(_MSC_VER) || _MSC_VER >= 1700)
void FixedLengthArrayConstructorTest() {
const int32_t nested_a[2] = { 1, 2 };
MyGame::Example::TestEnum nested_c[2] = { MyGame::Example::TestEnum::A,
MyGame::Example::TestEnum::B };
const int64_t int64_2[2] = { -2, -1 };
std::array<MyGame::Example::NestedStruct, 2> init_d = {
{ MyGame::Example::NestedStruct(nested_a, MyGame::Example::TestEnum::B,
nested_c, int64_2),
MyGame::Example::NestedStruct(nested_a, MyGame::Example::TestEnum::A,
nested_c,
std::array<int64_t, 2>{ { 12, 13 } }) }
};
MyGame::Example::ArrayStruct arr_struct(
8.125,
std::array<int32_t, 0xF>{
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } },
-17, init_d, 10, int64_2);
TEST_EQ(arr_struct.a(), 8.125);
TEST_EQ(arr_struct.b()->Get(2), 3);
TEST_EQ(arr_struct.c(), -17);
TEST_NOTNULL(arr_struct.d());
const auto &arr_d_0 = *arr_struct.d()->Get(0);
TEST_EQ(arr_d_0.a()->Get(0), 1);
TEST_EQ(arr_d_0.a()->Get(1), 2);
TEST_EQ(arr_d_0.b(), MyGame::Example::TestEnum::B);
TEST_EQ(arr_d_0.c()->Get(0), MyGame::Example::TestEnum::A);
TEST_EQ(arr_d_0.c()->Get(1), MyGame::Example::TestEnum::B);
TEST_EQ(arr_d_0.d()->Get(0), -2);
TEST_EQ(arr_d_0.d()->Get(1), -1);
const auto &arr_d_1 = *arr_struct.d()->Get(1);
TEST_EQ(arr_d_1.a()->Get(0), 1);
TEST_EQ(arr_d_1.a()->Get(1), 2);
TEST_EQ(arr_d_1.b(), MyGame::Example::TestEnum::A);
TEST_EQ(arr_d_1.c()->Get(0), MyGame::Example::TestEnum::A);
TEST_EQ(arr_d_1.c()->Get(1), MyGame::Example::TestEnum::B);
TEST_EQ(arr_d_1.d()->Get(0), 12);
TEST_EQ(arr_d_1.d()->Get(1), 13);
TEST_EQ(arr_struct.e(), 10);
TEST_EQ(arr_struct.f()->Get(0), -2);
TEST_EQ(arr_struct.f()->Get(1), -1);
}
#else
void FixedLengthArrayConstructorTest() {}
#endif
void NativeTypeTest() {
const int N = 3;
Geometry::ApplicationDataT src_data;
src_data.vectors.reserve(N);
src_data.vectors_alt.reserve(N);
for (int i = 0; i < N; ++i) {
src_data.vectors.push_back(
Native::Vector3D(10 * i + 0.1f, 10 * i + 0.2f, 10 * i + 0.3f));
src_data.vectors_alt.push_back(
Native::Vector3D(20 * i + 0.1f, 20 * i + 0.2f, 20 * i + 0.3f));
}
flatbuffers::FlatBufferBuilder fbb;
fbb.Finish(Geometry::ApplicationData::Pack(fbb, &src_data));
auto dstDataT = Geometry::UnPackApplicationData(fbb.GetBufferPointer());
for (int i = 0; i < N; ++i) {
const Native::Vector3D &v = dstDataT->vectors[i];
TEST_EQ(v.x, 10 * i + 0.1f);
TEST_EQ(v.y, 10 * i + 0.2f);
TEST_EQ(v.z, 10 * i + 0.3f);
const Native::Vector3D &v2 = dstDataT->vectors_alt[i];
TEST_EQ(v2.x, 20 * i + 0.1f);
TEST_EQ(v2.y, 20 * i + 0.2f);
TEST_EQ(v2.z, 20 * i + 0.3f);
}
}
void FixedLengthArrayJsonTest(bool binary) {
// VS10 does not support typed enums, exclude from tests
#if !defined(_MSC_VER) || _MSC_VER >= 1700
// load FlatBuffer schema (.fbs) and JSON from disk
std::string schemafile;
std::string jsonfile;
TEST_EQ(
flatbuffers::LoadFile(
(test_data_path + "arrays_test." + (binary ? "bfbs" : "fbs")).c_str(),
binary, &schemafile),
true);
TEST_EQ(flatbuffers::LoadFile((test_data_path + "arrays_test.golden").c_str(),
false, &jsonfile),
true);
// parse schema first, so we can use it to parse the data after
flatbuffers::Parser parserOrg, parserGen;
if (binary) {
flatbuffers::Verifier verifier(
reinterpret_cast<const uint8_t *>(schemafile.c_str()),
schemafile.size());
TEST_EQ(reflection::VerifySchemaBuffer(verifier), true);
TEST_EQ(parserOrg.Deserialize((const uint8_t *)schemafile.c_str(),
schemafile.size()),
true);
TEST_EQ(parserGen.Deserialize((const uint8_t *)schemafile.c_str(),
schemafile.size()),
true);
} else {
TEST_EQ(parserOrg.Parse(schemafile.c_str()), true);
TEST_EQ(parserGen.Parse(schemafile.c_str()), true);
}
TEST_EQ(parserOrg.Parse(jsonfile.c_str()), true);
// First, verify it, just in case:
flatbuffers::Verifier verifierOrg(parserOrg.builder_.GetBufferPointer(),
parserOrg.builder_.GetSize());
TEST_EQ(VerifyArrayTableBuffer(verifierOrg), true);
// Export to JSON
std::string jsonGen;
TEST_EQ(
GenerateText(parserOrg, parserOrg.builder_.GetBufferPointer(), &jsonGen),
true);
// Import from JSON
TEST_EQ(parserGen.Parse(jsonGen.c_str()), true);
// Verify buffer from generated JSON
flatbuffers::Verifier verifierGen(parserGen.builder_.GetBufferPointer(),
parserGen.builder_.GetSize());
TEST_EQ(VerifyArrayTableBuffer(verifierGen), true);
// Compare generated buffer to original
TEST_EQ(parserOrg.builder_.GetSize(), parserGen.builder_.GetSize());
TEST_EQ(std::memcmp(parserOrg.builder_.GetBufferPointer(),
parserGen.builder_.GetBufferPointer(),
parserOrg.builder_.GetSize()),
0);
#else
(void)binary;
#endif
}
void TestEmbeddedBinarySchema() {
// load JSON from disk
std::string jsonfile;
TEST_EQ(flatbuffers::LoadFile(
(test_data_path + "monsterdata_test.golden").c_str(), false,
&jsonfile),
true);
// parse schema first, so we can use it to parse the data after
flatbuffers::Parser parserOrg, parserGen;
flatbuffers::Verifier verifier(MyGame::Example::MonsterBinarySchema::data(),
MyGame::Example::MonsterBinarySchema::size());
TEST_EQ(reflection::VerifySchemaBuffer(verifier), true);
TEST_EQ(parserOrg.Deserialize(MyGame::Example::MonsterBinarySchema::data(),
MyGame::Example::MonsterBinarySchema::size()),
true);
TEST_EQ(parserGen.Deserialize(MyGame::Example::MonsterBinarySchema::data(),
MyGame::Example::MonsterBinarySchema::size()),
true);
TEST_EQ(parserOrg.Parse(jsonfile.c_str()), true);
// First, verify it, just in case:
flatbuffers::Verifier verifierOrg(parserOrg.builder_.GetBufferPointer(),
parserOrg.builder_.GetSize());
TEST_EQ(VerifyMonsterBuffer(verifierOrg), true);
// Export to JSON
std::string jsonGen;
TEST_EQ(
GenerateText(parserOrg, parserOrg.builder_.GetBufferPointer(), &jsonGen),
true);
// Import from JSON
TEST_EQ(parserGen.Parse(jsonGen.c_str()), true);
// Verify buffer from generated JSON
flatbuffers::Verifier verifierGen(parserGen.builder_.GetBufferPointer(),
parserGen.builder_.GetSize());
TEST_EQ(VerifyMonsterBuffer(verifierGen), true);
// Compare generated buffer to original
TEST_EQ(parserOrg.builder_.GetSize(), parserGen.builder_.GetSize());
TEST_EQ(std::memcmp(parserOrg.builder_.GetBufferPointer(),
parserGen.builder_.GetBufferPointer(),
parserOrg.builder_.GetSize()),
0);
}
void StringVectorDefaultsTest() {
std::vector<std::string> schemas;
schemas.push_back("table Monster { mana: string = \"\"; }");
schemas.push_back("table Monster { mana: string = \"mystr\"; }");
schemas.push_back("table Monster { mana: string = \" \"; }");
schemas.push_back("table Monster { mana: [int] = []; }");
schemas.push_back("table Monster { mana: [uint] = [ ]; }");
schemas.push_back("table Monster { mana: [byte] = [\t\t\n]; }");
schemas.push_back("enum E:int{}table Monster{mana:[E]=[];}");
for (auto s = schemas.begin(); s < schemas.end(); s++) {
flatbuffers::Parser parser;
TEST_ASSERT(parser.Parse(s->c_str()));
const auto *mana = parser.structs_.Lookup("Monster")->fields.Lookup("mana");
TEST_EQ(mana->IsDefault(), true);
}
}
void OptionalScalarsTest() {
// Simple schemas and a "has optional scalar" sentinal.
std::vector<std::string> schemas;
schemas.push_back("table Monster { mana : int; }");
schemas.push_back("table Monster { mana : int = 42; }");
schemas.push_back("table Monster { mana : int = null; }");
schemas.push_back("table Monster { mana : long; }");
schemas.push_back("table Monster { mana : long = 42; }");
schemas.push_back("table Monster { mana : long = null; }");
schemas.push_back("table Monster { mana : float; }");
schemas.push_back("table Monster { mana : float = 42; }");
schemas.push_back("table Monster { mana : float = null; }");
schemas.push_back("table Monster { mana : double; }");
schemas.push_back("table Monster { mana : double = 42; }");
schemas.push_back("table Monster { mana : double = null; }");
schemas.push_back("table Monster { mana : bool; }");
schemas.push_back("table Monster { mana : bool = 42; }");
schemas.push_back("table Monster { mana : bool = null; }");
schemas.push_back(
"enum Enum: int {A=0, B=1} "
"table Monster { mana : Enum; }");
schemas.push_back(
"enum Enum: int {A=0, B=1} "
"table Monster { mana : Enum = B; }");
schemas.push_back(
"enum Enum: int {A=0, B=1} "
"table Monster { mana : Enum = null; }");
// Check the FieldDef is correctly set.
for (auto schema = schemas.begin(); schema < schemas.end(); schema++) {
const bool has_null = schema->find("null") != std::string::npos;
flatbuffers::Parser parser;
TEST_ASSERT(parser.Parse(schema->c_str()));
const auto *mana = parser.structs_.Lookup("Monster")->fields.Lookup("mana");
TEST_EQ(mana->IsOptional(), has_null);
}
// Test if nullable scalars are allowed for each language.
for (unsigned lang = 1; lang < flatbuffers::IDLOptions::kMAX; lang <<= 1) {
flatbuffers::IDLOptions opts;
opts.lang_to_generate = lang;
if (false == flatbuffers::Parser::SupportsOptionalScalars(opts)) {
continue;
}
for (auto schema = schemas.begin(); schema < schemas.end(); schema++) {
flatbuffers::Parser parser(opts);
auto done = parser.Parse(schema->c_str());
TEST_EQ_STR(parser.error_.c_str(), "");
TEST_ASSERT(done);
}
}
// test C++ nullable
flatbuffers::FlatBufferBuilder fbb;
FinishScalarStuffBuffer(
fbb, optional_scalars::CreateScalarStuff(fbb, 1, static_cast<int8_t>(2)));
auto opts = optional_scalars::GetMutableScalarStuff(fbb.GetBufferPointer());
TEST_ASSERT(!opts->maybe_bool());
TEST_ASSERT(!opts->maybe_f32().has_value());
TEST_ASSERT(opts->maybe_i8().has_value());
TEST_EQ(opts->maybe_i8().value(), 2);
TEST_ASSERT(opts->mutate_maybe_i8(3));
TEST_ASSERT(opts->maybe_i8().has_value());
TEST_EQ(opts->maybe_i8().value(), 3);
TEST_ASSERT(!opts->mutate_maybe_i16(-10));
optional_scalars::ScalarStuffT obj;
TEST_ASSERT(!obj.maybe_bool);
TEST_ASSERT(!obj.maybe_f32.has_value());
opts->UnPackTo(&obj);
TEST_ASSERT(!obj.maybe_bool);
TEST_ASSERT(!obj.maybe_f32.has_value());
TEST_ASSERT(obj.maybe_i8.has_value() && obj.maybe_i8.value() == 3);
TEST_ASSERT(obj.maybe_i8 && *obj.maybe_i8 == 3);
obj.maybe_i32 = -1;
obj.maybe_enum = optional_scalars::OptionalByte_Two;
fbb.Clear();
FinishScalarStuffBuffer(fbb, optional_scalars::ScalarStuff::Pack(fbb, &obj));
opts = optional_scalars::GetMutableScalarStuff(fbb.GetBufferPointer());
TEST_ASSERT(opts->maybe_i8().has_value());
TEST_EQ(opts->maybe_i8().value(), 3);
TEST_ASSERT(opts->maybe_i32().has_value());
TEST_EQ(opts->maybe_i32().value(), -1);
TEST_EQ(opts->maybe_enum().value(), optional_scalars::OptionalByte_Two);
TEST_ASSERT(opts->maybe_i32() == flatbuffers::Optional<int64_t>(-1));
}
void ParseFlexbuffersFromJsonWithNullTest() {
// Test nulls are handled appropriately through flexbuffers to exercise other
// code paths of ParseSingleValue in the optional scalars change.
// TODO(cneo): Json -> Flatbuffers test once some language can generate code
// with optional scalars.
{
char json[] = "{\"opt_field\": 123 }";
flatbuffers::Parser parser;
flexbuffers::Builder flexbuild;
parser.ParseFlexBuffer(json, nullptr, &flexbuild);
auto root = flexbuffers::GetRoot(flexbuild.GetBuffer());
TEST_EQ(root.AsMap()["opt_field"].AsInt64(), 123);
}
{
char json[] = "{\"opt_field\": 123.4 }";
flatbuffers::Parser parser;
flexbuffers::Builder flexbuild;
parser.ParseFlexBuffer(json, nullptr, &flexbuild);
auto root = flexbuffers::GetRoot(flexbuild.GetBuffer());
TEST_EQ(root.AsMap()["opt_field"].AsDouble(), 123.4);
}
{
char json[] = "{\"opt_field\": null }";
flatbuffers::Parser parser;
flexbuffers::Builder flexbuild;
parser.ParseFlexBuffer(json, nullptr, &flexbuild);
auto root = flexbuffers::GetRoot(flexbuild.GetBuffer());
TEST_ASSERT(!root.AsMap().IsTheEmptyMap());
TEST_ASSERT(root.AsMap()["opt_field"].IsNull());
TEST_EQ(root.ToString(), std::string("{ opt_field: null }"));
}
}
void FieldIdentifierTest() {
using flatbuffers::Parser;
TEST_EQ(true, Parser().Parse("table T{ f: int (id:0); }"));
// non-integer `id` should be rejected
TEST_EQ(false, Parser().Parse("table T{ f: int (id:text); }"));
TEST_EQ(false, Parser().Parse("table T{ f: int (id:\"text\"); }"));
TEST_EQ(false, Parser().Parse("table T{ f: int (id:0text); }"));
TEST_EQ(false, Parser().Parse("table T{ f: int (id:1.0); }"));
TEST_EQ(false, Parser().Parse("table T{ f: int (id:-1); g: int (id:0); }"));
TEST_EQ(false, Parser().Parse("table T{ f: int (id:129496726); }"));
// A unuion filed occupys two ids: enumerator + pointer (offset).
TEST_EQ(false,
Parser().Parse("union X{} table T{ u: X(id:0); table F{x:int;\n}"));
// Positive tests for unions
TEST_EQ(true, Parser().Parse("union X{} table T{ u: X (id:1); }"));
TEST_EQ(true, Parser().Parse("union X{} table T{ u: X; }"));
// Test using 'inf' and 'nan' words both as identifiers and as default values.
TEST_EQ(true, Parser().Parse("table T{ nan: string; }"));
TEST_EQ(true, Parser().Parse("table T{ inf: string; }"));
#if defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
TEST_EQ(true, Parser().Parse("table T{ inf: float = inf; }"));
TEST_EQ(true, Parser().Parse("table T{ nan: float = inf; }"));
#endif
}
void ParseIncorrectMonsterJsonTest() {
std::string schemafile;
TEST_EQ(flatbuffers::LoadFile((test_data_path + "monster_test.bfbs").c_str(),
true, &schemafile),
true);
flatbuffers::Parser parser;
flatbuffers::Verifier verifier(
reinterpret_cast<const uint8_t *>(schemafile.c_str()), schemafile.size());
TEST_EQ(reflection::VerifySchemaBuffer(verifier), true);
TEST_EQ(parser.Deserialize((const uint8_t *)schemafile.c_str(),
schemafile.size()),
true);
TEST_EQ(parser.ParseJson("{name:\"monster\"}"), true);
TEST_EQ(parser.ParseJson(""), false);
TEST_EQ(parser.ParseJson("{name: 1}"), false);
TEST_EQ(parser.ParseJson("{name:+1}"), false);
TEST_EQ(parser.ParseJson("{name:-1}"), false);
TEST_EQ(parser.ParseJson("{name:-f}"), false);
TEST_EQ(parser.ParseJson("{name:+f}"), false);
}
#if !defined(_MSC_VER) || _MSC_VER >= 1700
template<class T, class Container>
void TestIterators(const std::vector<T> &expected, const Container &tested) {
TEST_ASSERT(tested.rbegin().base() == tested.end());
TEST_ASSERT(tested.crbegin().base() == tested.cend());
TEST_ASSERT(tested.rend().base() == tested.begin());
TEST_ASSERT(tested.crend().base() == tested.cbegin());
size_t k = 0;
for (auto it = tested.begin(); it != tested.end(); ++it, ++k) {
const auto &e = expected.at(k);
TEST_EQ(*it, e);
}
TEST_EQ(k, expected.size());
k = expected.size();
for (auto it = tested.rbegin(); it != tested.rend(); ++it, --k) {
const auto &e = expected.at(k - 1);
TEST_EQ(*it, e);
}
TEST_EQ(k, 0);
}
void FlatbuffersIteratorsTest() {
{
flatbuffers::FlatBufferBuilder fbb;
const std::vector<unsigned char> inv_data = { 1, 2, 3 };
{
auto mon_name = fbb.CreateString("MyMonster"); // key, mandatory
auto inv_vec = fbb.CreateVector(inv_data);
auto empty_i64_vec =
fbb.CreateVector(static_cast<const int64_t *>(nullptr), 0);
MonsterBuilder mb(fbb);
mb.add_name(mon_name);
mb.add_inventory(inv_vec);
mb.add_vector_of_longs(empty_i64_vec);
FinishMonsterBuffer(fbb, mb.Finish());
}
const auto &mon = *flatbuffers::GetRoot<Monster>(fbb.GetBufferPointer());
TEST_EQ_STR("MyMonster", mon.name()->c_str());
TEST_ASSERT(mon.inventory());
TEST_ASSERT(mon.vector_of_longs());
TestIterators(inv_data, *mon.inventory());
TestIterators(std::vector<int64_t>(), *mon.vector_of_longs());
}
{
flatbuffers::FlatBufferBuilder fbb;
MyGame::Example::ArrayStruct aStruct;
MyGame::Example::FinishArrayTableBuffer(
fbb, MyGame::Example::CreateArrayTable(fbb, &aStruct));
const auto &array_table =
*flatbuffers::GetRoot<ArrayTable>(fbb.GetBufferPointer());
TEST_ASSERT(array_table.a());
auto &int_15 = *array_table.a()->b();
TestIterators(std::vector<int>(15, 0), int_15);
}
}
#else
void FlatbuffersIteratorsTest() {}
#endif
int FlatBufferTests() {
// clang-format off
// Run our various test suites:
std::string rawbuf;
auto flatbuf1 = CreateFlatBufferTest(rawbuf);
#if !defined(FLATBUFFERS_CPP98_STL)
auto flatbuf = std::move(flatbuf1); // Test move assignment.
#else
auto &flatbuf = flatbuf1;
#endif // !defined(FLATBUFFERS_CPP98_STL)
TriviallyCopyableTest();
AccessFlatBufferTest(reinterpret_cast<const uint8_t *>(rawbuf.c_str()),
rawbuf.length());
AccessFlatBufferTest(flatbuf.data(), flatbuf.size());
MutateFlatBuffersTest(flatbuf.data(), flatbuf.size());
ObjectFlatBuffersTest(flatbuf.data());
MiniReflectFlatBuffersTest(flatbuf.data());
MiniReflectFixedLengthArrayTest();
SizePrefixedTest();
#ifndef FLATBUFFERS_NO_FILE_TESTS
#ifdef FLATBUFFERS_TEST_PATH_PREFIX
test_data_path = FLATBUFFERS_STRING(FLATBUFFERS_TEST_PATH_PREFIX) +
test_data_path;
#endif
ParseAndGenerateTextTest(false);
ParseAndGenerateTextTest(true);
FixedLengthArrayJsonTest(false);
FixedLengthArrayJsonTest(true);
ReflectionTest(flatbuf.data(), flatbuf.size());
ParseProtoTest();
ParseProtoTestWithSuffix();
ParseProtoTestWithIncludes();
EvolutionTest();
UnionDeprecationTest();
UnionVectorTest();
LoadVerifyBinaryTest();
GenerateTableTextTest();
TestEmbeddedBinarySchema();
#endif
// clang-format on
FuzzTest1();
FuzzTest2();
ErrorTest();
ValueTest();
EnumValueTest();
EnumStringsTest();
EnumNamesTest();
EnumOutOfRangeTest();
IntegerOutOfRangeTest();
IntegerBoundaryTest();
UnicodeTest();
UnicodeTestAllowNonUTF8();
UnicodeTestGenerateTextFailsOnNonUTF8();
UnicodeSurrogatesTest();
UnicodeInvalidSurrogatesTest();
InvalidUTF8Test();
UnknownFieldsTest();
ParseUnionTest();
InvalidNestedFlatbufferTest();
ConformTest();
ParseProtoBufAsciiTest();
TypeAliasesTest();
EndianSwapTest();
CreateSharedStringTest();
JsonDefaultTest();
JsonEnumsTest();
FlexBuffersTest();
FlexBuffersDeprecatedTest();
UninitializedVectorTest();
EqualOperatorTest();
NumericUtilsTest();
IsAsciiUtilsTest();
ValidFloatTest();
InvalidFloatTest();
TestMonsterExtraFloats();
FixedLengthArrayTest();
NativeTypeTest();
OptionalScalarsTest();
ParseFlexbuffersFromJsonWithNullTest();
FlatbuffersSpanTest();
FixedLengthArrayConstructorTest();
FieldIdentifierTest();
StringVectorDefaultsTest();
ParseIncorrectMonsterJsonTest();
FlexBuffersFloatingPointTest();
FlatbuffersIteratorsTest();
return 0;
}
int main(int /*argc*/, const char * /*argv*/[]) {
InitTestEngine();
std::string req_locale;
if (flatbuffers::ReadEnvironmentVariable("FLATBUFFERS_TEST_LOCALE",
&req_locale)) {
TEST_OUTPUT_LINE("The environment variable FLATBUFFERS_TEST_LOCALE=%s",
req_locale.c_str());
req_locale = flatbuffers::RemoveStringQuotes(req_locale);
std::string the_locale;
TEST_ASSERT_FUNC(
flatbuffers::SetGlobalTestLocale(req_locale.c_str(), &the_locale));
TEST_OUTPUT_LINE("The global C-locale changed: %s", the_locale.c_str());
}
FlatBufferTests();
FlatBufferBuilderTest();
if (!testing_fails) {
TEST_OUTPUT_LINE("ALL TESTS PASSED");
} else {
TEST_OUTPUT_LINE("%d FAILED TESTS", testing_fails);
}
return CloseTestEngine();
}
| 1 | 20,934 | This seems like an incomplete schema definition. will that have any effect on the tests? | google-flatbuffers | java |
@@ -22,7 +22,10 @@ DatasetLoader::DatasetLoader(const Config& io_config, const PredictFunction& pre
label_idx_ = 0;
weight_idx_ = NO_SPECIFIC;
group_idx_ = NO_SPECIFIC;
- SetHeader(filename);
+ if (filename != nullptr && CheckCanLoadFromBin(filename) == "") {
+ // SetHeader should only be called when loading from text file
+ SetHeader(filename);
+ }
store_raw_ = false;
if (io_config.linear_tree) {
store_raw_ = true; | 1 | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#include <LightGBM/dataset_loader.h>
#include <LightGBM/network.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <chrono>
#include <fstream>
namespace LightGBM {
using json11::Json;
DatasetLoader::DatasetLoader(const Config& io_config, const PredictFunction& predict_fun, int num_class, const char* filename)
:config_(io_config), random_(config_.data_random_seed), predict_fun_(predict_fun), num_class_(num_class) {
label_idx_ = 0;
weight_idx_ = NO_SPECIFIC;
group_idx_ = NO_SPECIFIC;
SetHeader(filename);
store_raw_ = false;
if (io_config.linear_tree) {
store_raw_ = true;
}
}
DatasetLoader::~DatasetLoader() {
}
void DatasetLoader::SetHeader(const char* filename) {
std::unordered_map<std::string, int> name2idx;
std::string name_prefix("name:");
if (filename != nullptr) {
TextReader<data_size_t> text_reader(filename, config_.header);
// get column names
if (config_.header) {
std::string first_line = text_reader.first_line();
feature_names_ = Common::Split(first_line.c_str(), "\t,");
}
// load label idx first
if (config_.label_column.size() > 0) {
if (Common::StartsWith(config_.label_column, name_prefix)) {
std::string name = config_.label_column.substr(name_prefix.size());
label_idx_ = -1;
for (int i = 0; i < static_cast<int>(feature_names_.size()); ++i) {
if (name == feature_names_[i]) {
label_idx_ = i;
break;
}
}
if (label_idx_ >= 0) {
Log::Info("Using column %s as label", name.c_str());
} else {
Log::Fatal("Could not find label column %s in data file \n"
"or data file doesn't contain header", name.c_str());
}
} else {
if (!Common::AtoiAndCheck(config_.label_column.c_str(), &label_idx_)) {
Log::Fatal("label_column is not a number,\n"
"if you want to use a column name,\n"
"please add the prefix \"name:\" to the column name");
}
Log::Info("Using column number %d as label", label_idx_);
}
}
if (!feature_names_.empty()) {
// erase label column name
feature_names_.erase(feature_names_.begin() + label_idx_);
for (size_t i = 0; i < feature_names_.size(); ++i) {
name2idx[feature_names_[i]] = static_cast<int>(i);
}
}
// load ignore columns
if (config_.ignore_column.size() > 0) {
if (Common::StartsWith(config_.ignore_column, name_prefix)) {
std::string names = config_.ignore_column.substr(name_prefix.size());
for (auto name : Common::Split(names.c_str(), ',')) {
if (name2idx.count(name) > 0) {
int tmp = name2idx[name];
ignore_features_.emplace(tmp);
} else {
Log::Fatal("Could not find ignore column %s in data file", name.c_str());
}
}
} else {
for (auto token : Common::Split(config_.ignore_column.c_str(), ',')) {
int tmp = 0;
if (!Common::AtoiAndCheck(token.c_str(), &tmp)) {
Log::Fatal("ignore_column is not a number,\n"
"if you want to use a column name,\n"
"please add the prefix \"name:\" to the column name");
}
ignore_features_.emplace(tmp);
}
}
}
// load weight idx
if (config_.weight_column.size() > 0) {
if (Common::StartsWith(config_.weight_column, name_prefix)) {
std::string name = config_.weight_column.substr(name_prefix.size());
if (name2idx.count(name) > 0) {
weight_idx_ = name2idx[name];
Log::Info("Using column %s as weight", name.c_str());
} else {
Log::Fatal("Could not find weight column %s in data file", name.c_str());
}
} else {
if (!Common::AtoiAndCheck(config_.weight_column.c_str(), &weight_idx_)) {
Log::Fatal("weight_column is not a number,\n"
"if you want to use a column name,\n"
"please add the prefix \"name:\" to the column name");
}
Log::Info("Using column number %d as weight", weight_idx_);
}
ignore_features_.emplace(weight_idx_);
}
// load group idx
if (config_.group_column.size() > 0) {
if (Common::StartsWith(config_.group_column, name_prefix)) {
std::string name = config_.group_column.substr(name_prefix.size());
if (name2idx.count(name) > 0) {
group_idx_ = name2idx[name];
Log::Info("Using column %s as group/query id", name.c_str());
} else {
Log::Fatal("Could not find group/query column %s in data file", name.c_str());
}
} else {
if (!Common::AtoiAndCheck(config_.group_column.c_str(), &group_idx_)) {
Log::Fatal("group_column is not a number,\n"
"if you want to use a column name,\n"
"please add the prefix \"name:\" to the column name");
}
Log::Info("Using column number %d as group/query id", group_idx_);
}
ignore_features_.emplace(group_idx_);
}
}
if (config_.categorical_feature.size() > 0) {
if (Common::StartsWith(config_.categorical_feature, name_prefix)) {
std::string names = config_.categorical_feature.substr(name_prefix.size());
for (auto name : Common::Split(names.c_str(), ',')) {
if (name2idx.count(name) > 0) {
int tmp = name2idx[name];
categorical_features_.emplace(tmp);
} else {
Log::Fatal("Could not find categorical_feature %s in data file", name.c_str());
}
}
} else {
for (auto token : Common::Split(config_.categorical_feature.c_str(), ',')) {
int tmp = 0;
if (!Common::AtoiAndCheck(token.c_str(), &tmp)) {
Log::Fatal("categorical_feature is not a number,\n"
"if you want to use a column name,\n"
"please add the prefix \"name:\" to the column name");
}
categorical_features_.emplace(tmp);
}
}
}
}
void CheckSampleSize(size_t sample_cnt, size_t num_data) {
if (static_cast<double>(sample_cnt) / num_data < 0.2f &&
sample_cnt < 100000) {
Log::Warning(
"Using too small ``bin_construct_sample_cnt`` may encounter "
"unexpected "
"errors and poor accuracy.");
}
}
Dataset* DatasetLoader::LoadFromFile(const char* filename, int rank, int num_machines) {
// don't support query id in data file when using distributed training
if (num_machines > 1 && !config_.pre_partition) {
if (group_idx_ > 0) {
Log::Fatal("Using a query id without pre-partitioning the data file is not supported for distributed training.\n"
"Please use an additional query file or pre-partition the data");
}
}
auto dataset = std::unique_ptr<Dataset>(new Dataset());
if (store_raw_) {
dataset->SetHasRaw(true);
}
data_size_t num_global_data = 0;
std::vector<data_size_t> used_data_indices;
auto bin_filename = CheckCanLoadFromBin(filename);
bool is_load_from_binary = false;
if (bin_filename.size() == 0) {
auto parser = std::unique_ptr<Parser>(Parser::CreateParser(filename, config_.header, 0, label_idx_,
config_.precise_float_parser));
if (parser == nullptr) {
Log::Fatal("Could not recognize data format of %s", filename);
}
dataset->data_filename_ = filename;
dataset->label_idx_ = label_idx_;
dataset->metadata_.Init(filename);
if (!config_.two_round) {
// read data to memory
auto text_data = LoadTextDataToMemory(filename, dataset->metadata_, rank, num_machines, &num_global_data, &used_data_indices);
dataset->num_data_ = static_cast<data_size_t>(text_data.size());
// sample data
auto sample_data = SampleTextDataFromMemory(text_data);
CheckSampleSize(sample_data.size(),
static_cast<size_t>(dataset->num_data_));
// construct feature bin mappers
ConstructBinMappersFromTextData(rank, num_machines, sample_data, parser.get(), dataset.get());
if (dataset->has_raw()) {
dataset->ResizeRaw(dataset->num_data_);
}
// initialize label
dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_);
// extract features
ExtractFeaturesFromMemory(&text_data, parser.get(), dataset.get());
text_data.clear();
} else {
// sample data from file
auto sample_data = SampleTextDataFromFile(filename, dataset->metadata_, rank, num_machines, &num_global_data, &used_data_indices);
if (used_data_indices.size() > 0) {
dataset->num_data_ = static_cast<data_size_t>(used_data_indices.size());
} else {
dataset->num_data_ = num_global_data;
}
CheckSampleSize(sample_data.size(),
static_cast<size_t>(dataset->num_data_));
// construct feature bin mappers
ConstructBinMappersFromTextData(rank, num_machines, sample_data, parser.get(), dataset.get());
if (dataset->has_raw()) {
dataset->ResizeRaw(dataset->num_data_);
}
// initialize label
dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_);
Log::Info("Making second pass...");
// extract features
ExtractFeaturesFromFile(filename, parser.get(), used_data_indices, dataset.get());
}
} else {
// load data from binary file
is_load_from_binary = true;
Log::Info("Load from binary file %s", bin_filename.c_str());
dataset.reset(LoadFromBinFile(filename, bin_filename.c_str(), rank, num_machines, &num_global_data, &used_data_indices));
}
// check meta data
dataset->metadata_.CheckOrPartition(num_global_data, used_data_indices);
// need to check training data
CheckDataset(dataset.get(), is_load_from_binary);
return dataset.release();
}
Dataset* DatasetLoader::LoadFromFileAlignWithOtherDataset(const char* filename, const Dataset* train_data) {
data_size_t num_global_data = 0;
std::vector<data_size_t> used_data_indices;
auto dataset = std::unique_ptr<Dataset>(new Dataset());
if (store_raw_) {
dataset->SetHasRaw(true);
}
auto bin_filename = CheckCanLoadFromBin(filename);
if (bin_filename.size() == 0) {
auto parser = std::unique_ptr<Parser>(Parser::CreateParser(filename, config_.header, 0, label_idx_,
config_.precise_float_parser));
if (parser == nullptr) {
Log::Fatal("Could not recognize data format of %s", filename);
}
dataset->data_filename_ = filename;
dataset->label_idx_ = label_idx_;
dataset->metadata_.Init(filename);
if (!config_.two_round) {
// read data in memory
auto text_data = LoadTextDataToMemory(filename, dataset->metadata_, 0, 1, &num_global_data, &used_data_indices);
dataset->num_data_ = static_cast<data_size_t>(text_data.size());
// initialize label
dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_);
dataset->CreateValid(train_data);
if (dataset->has_raw()) {
dataset->ResizeRaw(dataset->num_data_);
}
// extract features
ExtractFeaturesFromMemory(&text_data, parser.get(), dataset.get());
text_data.clear();
} else {
TextReader<data_size_t> text_reader(filename, config_.header);
// Get number of lines of data file
dataset->num_data_ = static_cast<data_size_t>(text_reader.CountLine());
num_global_data = dataset->num_data_;
// initialize label
dataset->metadata_.Init(dataset->num_data_, weight_idx_, group_idx_);
dataset->CreateValid(train_data);
if (dataset->has_raw()) {
dataset->ResizeRaw(dataset->num_data_);
}
// extract features
ExtractFeaturesFromFile(filename, parser.get(), used_data_indices, dataset.get());
}
} else {
// load data from binary file
dataset.reset(LoadFromBinFile(filename, bin_filename.c_str(), 0, 1, &num_global_data, &used_data_indices));
}
// not need to check validation data
// check meta data
dataset->metadata_.CheckOrPartition(num_global_data, used_data_indices);
return dataset.release();
}
Dataset* DatasetLoader::LoadFromBinFile(const char* data_filename, const char* bin_filename,
int rank, int num_machines, int* num_global_data,
std::vector<data_size_t>* used_data_indices) {
auto dataset = std::unique_ptr<Dataset>(new Dataset());
auto reader = VirtualFileReader::Make(bin_filename);
dataset->data_filename_ = data_filename;
if (!reader->Init()) {
Log::Fatal("Could not read binary data from %s", bin_filename);
}
// buffer to read binary file
size_t buffer_size = 16 * 1024 * 1024;
auto buffer = std::vector<char>(buffer_size);
// check token
size_t size_of_token = std::strlen(Dataset::binary_file_token);
size_t read_cnt = reader->Read(
buffer.data(),
VirtualFileWriter::AlignedSize(sizeof(char) * size_of_token));
if (read_cnt < sizeof(char) * size_of_token) {
Log::Fatal("Binary file error: token has the wrong size");
}
if (std::string(buffer.data()) != std::string(Dataset::binary_file_token)) {
Log::Fatal("Input file is not LightGBM binary file");
}
// read size of header
read_cnt = reader->Read(buffer.data(), sizeof(size_t));
if (read_cnt != sizeof(size_t)) {
Log::Fatal("Binary file error: header has the wrong size");
}
size_t size_of_head = *(reinterpret_cast<size_t*>(buffer.data()));
// re-allocmate space if not enough
if (size_of_head > buffer_size) {
buffer_size = size_of_head;
buffer.resize(buffer_size);
}
// read header
read_cnt = reader->Read(buffer.data(), size_of_head);
if (read_cnt != size_of_head) {
Log::Fatal("Binary file error: header is incorrect");
}
// get header
const char* mem_ptr = buffer.data();
dataset->num_data_ = *(reinterpret_cast<const data_size_t*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->num_data_));
dataset->num_features_ = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->num_features_));
dataset->num_total_features_ = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr +=
VirtualFileWriter::AlignedSize(sizeof(dataset->num_total_features_));
dataset->label_idx_ = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->label_idx_));
dataset->max_bin_ = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->max_bin_));
dataset->bin_construct_sample_cnt_ = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(
sizeof(dataset->bin_construct_sample_cnt_));
dataset->min_data_in_bin_ = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->min_data_in_bin_));
dataset->use_missing_ = *(reinterpret_cast<const bool*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->use_missing_));
dataset->zero_as_missing_ = *(reinterpret_cast<const bool*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->zero_as_missing_));
dataset->has_raw_ = *(reinterpret_cast<const bool*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->has_raw_));
const int* tmp_feature_map = reinterpret_cast<const int*>(mem_ptr);
dataset->used_feature_map_.clear();
for (int i = 0; i < dataset->num_total_features_; ++i) {
dataset->used_feature_map_.push_back(tmp_feature_map[i]);
}
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(int) *
dataset->num_total_features_);
// num_groups
dataset->num_groups_ = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(dataset->num_groups_));
// real_feature_idx_
const int* tmp_ptr_real_feature_idx_ = reinterpret_cast<const int*>(mem_ptr);
dataset->real_feature_idx_.clear();
for (int i = 0; i < dataset->num_features_; ++i) {
dataset->real_feature_idx_.push_back(tmp_ptr_real_feature_idx_[i]);
}
mem_ptr +=
VirtualFileWriter::AlignedSize(sizeof(int) * dataset->num_features_);
// feature2group
const int* tmp_ptr_feature2group = reinterpret_cast<const int*>(mem_ptr);
dataset->feature2group_.clear();
for (int i = 0; i < dataset->num_features_; ++i) {
dataset->feature2group_.push_back(tmp_ptr_feature2group[i]);
}
mem_ptr +=
VirtualFileWriter::AlignedSize(sizeof(int) * dataset->num_features_);
// feature2subfeature
const int* tmp_ptr_feature2subfeature = reinterpret_cast<const int*>(mem_ptr);
dataset->feature2subfeature_.clear();
for (int i = 0; i < dataset->num_features_; ++i) {
dataset->feature2subfeature_.push_back(tmp_ptr_feature2subfeature[i]);
}
mem_ptr +=
VirtualFileWriter::AlignedSize(sizeof(int) * dataset->num_features_);
// group_bin_boundaries
const uint64_t* tmp_ptr_group_bin_boundaries = reinterpret_cast<const uint64_t*>(mem_ptr);
dataset->group_bin_boundaries_.clear();
for (int i = 0; i < dataset->num_groups_ + 1; ++i) {
dataset->group_bin_boundaries_.push_back(tmp_ptr_group_bin_boundaries[i]);
}
mem_ptr += sizeof(uint64_t) * (dataset->num_groups_ + 1);
// group_feature_start_
const int* tmp_ptr_group_feature_start = reinterpret_cast<const int*>(mem_ptr);
dataset->group_feature_start_.clear();
for (int i = 0; i < dataset->num_groups_; ++i) {
dataset->group_feature_start_.push_back(tmp_ptr_group_feature_start[i]);
}
mem_ptr +=
VirtualFileWriter::AlignedSize(sizeof(int) * (dataset->num_groups_));
// group_feature_cnt_
const int* tmp_ptr_group_feature_cnt = reinterpret_cast<const int*>(mem_ptr);
dataset->group_feature_cnt_.clear();
for (int i = 0; i < dataset->num_groups_; ++i) {
dataset->group_feature_cnt_.push_back(tmp_ptr_group_feature_cnt[i]);
}
mem_ptr +=
VirtualFileWriter::AlignedSize(sizeof(int) * (dataset->num_groups_));
if (!config_.max_bin_by_feature.empty()) {
CHECK_EQ(static_cast<size_t>(dataset->num_total_features_), config_.max_bin_by_feature.size());
CHECK_GT(*(std::min_element(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end())), 1);
dataset->max_bin_by_feature_.resize(dataset->num_total_features_);
dataset->max_bin_by_feature_.assign(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end());
} else {
const int32_t* tmp_ptr_max_bin_by_feature = reinterpret_cast<const int32_t*>(mem_ptr);
dataset->max_bin_by_feature_.clear();
for (int i = 0; i < dataset->num_total_features_; ++i) {
dataset->max_bin_by_feature_.push_back(tmp_ptr_max_bin_by_feature[i]);
}
}
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(int32_t) *
(dataset->num_total_features_));
if (ArrayArgs<int32_t>::CheckAll(dataset->max_bin_by_feature_, -1)) {
dataset->max_bin_by_feature_.clear();
}
// get feature names
dataset->feature_names_.clear();
// write feature names
for (int i = 0; i < dataset->num_total_features_; ++i) {
int str_len = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(int));
std::stringstream str_buf;
auto tmp_arr = reinterpret_cast<const char*>(mem_ptr);
for (int j = 0; j < str_len; ++j) {
char tmp_char = tmp_arr[j];
str_buf << tmp_char;
}
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(char) * str_len);
dataset->feature_names_.emplace_back(str_buf.str());
}
// get forced_bin_bounds_
dataset->forced_bin_bounds_ = std::vector<std::vector<double>>(dataset->num_total_features_, std::vector<double>());
for (int i = 0; i < dataset->num_total_features_; ++i) {
int num_bounds = *(reinterpret_cast<const int*>(mem_ptr));
mem_ptr += VirtualFileWriter::AlignedSize(sizeof(int));
dataset->forced_bin_bounds_[i] = std::vector<double>();
const double* tmp_ptr_forced_bounds =
reinterpret_cast<const double*>(mem_ptr);
for (int j = 0; j < num_bounds; ++j) {
double bound = tmp_ptr_forced_bounds[j];
dataset->forced_bin_bounds_[i].push_back(bound);
}
mem_ptr += num_bounds * sizeof(double);
}
// read size of meta data
read_cnt = reader->Read(buffer.data(), sizeof(size_t));
if (read_cnt != sizeof(size_t)) {
Log::Fatal("Binary file error: meta data has the wrong size");
}
size_t size_of_metadata = *(reinterpret_cast<size_t*>(buffer.data()));
// re-allocate space if not enough
if (size_of_metadata > buffer_size) {
buffer_size = size_of_metadata;
buffer.resize(buffer_size);
}
// read meta data
read_cnt = reader->Read(buffer.data(), size_of_metadata);
if (read_cnt != size_of_metadata) {
Log::Fatal("Binary file error: meta data is incorrect");
}
// load meta data
dataset->metadata_.LoadFromMemory(buffer.data());
*num_global_data = dataset->num_data_;
used_data_indices->clear();
// sample local used data if need to partition
if (num_machines > 1 && !config_.pre_partition) {
const data_size_t* query_boundaries = dataset->metadata_.query_boundaries();
if (query_boundaries == nullptr) {
// if not contain query file, minimal sample unit is one record
for (data_size_t i = 0; i < dataset->num_data_; ++i) {
if (random_.NextShort(0, num_machines) == rank) {
used_data_indices->push_back(i);
}
}
} else {
// if contain query file, minimal sample unit is one query
data_size_t num_queries = dataset->metadata_.num_queries();
data_size_t qid = -1;
bool is_query_used = false;
for (data_size_t i = 0; i < dataset->num_data_; ++i) {
if (qid >= num_queries) {
Log::Fatal("Current query exceeds the range of the query file,\n"
"please ensure the query file is correct");
}
if (i >= query_boundaries[qid + 1]) {
// if is new query
is_query_used = false;
if (random_.NextShort(0, num_machines) == rank) {
is_query_used = true;
}
++qid;
}
if (is_query_used) {
used_data_indices->push_back(i);
}
}
}
dataset->num_data_ = static_cast<data_size_t>((*used_data_indices).size());
}
dataset->metadata_.PartitionLabel(*used_data_indices);
// read feature data
for (int i = 0; i < dataset->num_groups_; ++i) {
// read feature size
read_cnt = reader->Read(buffer.data(), sizeof(size_t));
if (read_cnt != sizeof(size_t)) {
Log::Fatal("Binary file error: feature %d has the wrong size", i);
}
size_t size_of_feature = *(reinterpret_cast<size_t*>(buffer.data()));
// re-allocate space if not enough
if (size_of_feature > buffer_size) {
buffer_size = size_of_feature;
buffer.resize(buffer_size);
}
read_cnt = reader->Read(buffer.data(), size_of_feature);
if (read_cnt != size_of_feature) {
Log::Fatal("Binary file error: feature %d is incorrect, read count: %d", i, read_cnt);
}
dataset->feature_groups_.emplace_back(std::unique_ptr<FeatureGroup>(
new FeatureGroup(buffer.data(),
*num_global_data,
*used_data_indices, i)));
}
dataset->feature_groups_.shrink_to_fit();
// raw data
dataset->numeric_feature_map_ = std::vector<int>(dataset->num_features_, false);
dataset->num_numeric_features_ = 0;
for (int i = 0; i < dataset->num_features_; ++i) {
if (dataset->FeatureBinMapper(i)->bin_type() == BinType::CategoricalBin) {
dataset->numeric_feature_map_[i] = -1;
} else {
dataset->numeric_feature_map_[i] = dataset->num_numeric_features_;
++dataset->num_numeric_features_;
}
}
if (dataset->has_raw()) {
dataset->ResizeRaw(dataset->num_data());
size_t row_size = dataset->num_numeric_features_ * sizeof(float);
if (row_size > buffer_size) {
buffer_size = row_size;
buffer.resize(buffer_size);
}
for (int i = 0; i < dataset->num_data(); ++i) {
read_cnt = reader->Read(buffer.data(), row_size);
if (read_cnt != row_size) {
Log::Fatal("Binary file error: row %d of raw data is incorrect, read count: %d", i, read_cnt);
}
mem_ptr = buffer.data();
const float* tmp_ptr_raw_row = reinterpret_cast<const float*>(mem_ptr);
for (int j = 0; j < dataset->num_features(); ++j) {
int feat_ind = dataset->numeric_feature_map_[j];
if (feat_ind >= 0) {
dataset->raw_data_[feat_ind][i] = tmp_ptr_raw_row[feat_ind];
}
}
mem_ptr += row_size;
}
}
dataset->is_finish_load_ = true;
return dataset.release();
}
Dataset* DatasetLoader::ConstructFromSampleData(double** sample_values,
int** sample_indices, int num_col, const int* num_per_col,
size_t total_sample_size, data_size_t num_data) {
CheckSampleSize(total_sample_size, static_cast<size_t>(num_data));
int num_total_features = num_col;
if (Network::num_machines() > 1) {
num_total_features = Network::GlobalSyncUpByMax(num_total_features);
}
std::vector<std::unique_ptr<BinMapper>> bin_mappers(num_total_features);
// fill feature_names_ if not header
if (feature_names_.empty()) {
for (int i = 0; i < num_col; ++i) {
std::stringstream str_buf;
str_buf << "Column_" << i;
feature_names_.push_back(str_buf.str());
}
}
if (!config_.max_bin_by_feature.empty()) {
CHECK_EQ(static_cast<size_t>(num_col), config_.max_bin_by_feature.size());
CHECK_GT(*(std::min_element(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end())), 1);
}
// get forced split
std::string forced_bins_path = config_.forcedbins_filename;
std::vector<std::vector<double>> forced_bin_bounds = DatasetLoader::GetForcedBins(forced_bins_path, num_col, categorical_features_);
const data_size_t filter_cnt = static_cast<data_size_t>(
static_cast<double>(config_.min_data_in_leaf * total_sample_size) / num_data);
if (Network::num_machines() == 1) {
// if only one machine, find bin locally
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_col; ++i) {
OMP_LOOP_EX_BEGIN();
if (ignore_features_.count(i) > 0) {
bin_mappers[i] = nullptr;
continue;
}
BinType bin_type = BinType::NumericalBin;
if (categorical_features_.count(i)) {
bin_type = BinType::CategoricalBin;
bool feat_is_unconstrained = ((config_.monotone_constraints.size() == 0) || (config_.monotone_constraints[i] == 0));
if (!feat_is_unconstrained) {
Log::Fatal("The output cannot be monotone with respect to categorical features");
}
}
bin_mappers[i].reset(new BinMapper());
if (config_.max_bin_by_feature.empty()) {
bin_mappers[i]->FindBin(sample_values[i], num_per_col[i], total_sample_size,
config_.max_bin, config_.min_data_in_bin, filter_cnt, config_.feature_pre_filter,
bin_type, config_.use_missing, config_.zero_as_missing,
forced_bin_bounds[i]);
} else {
bin_mappers[i]->FindBin(sample_values[i], num_per_col[i], total_sample_size,
config_.max_bin_by_feature[i], config_.min_data_in_bin,
filter_cnt, config_.feature_pre_filter, bin_type, config_.use_missing,
config_.zero_as_missing, forced_bin_bounds[i]);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
} else {
// if have multi-machines, need to find bin distributed
// different machines will find bin for different features
int num_machines = Network::num_machines();
int rank = Network::rank();
// start and len will store the process feature indices for different machines
// machine i will find bins for features in [ start[i], start[i] + len[i] )
std::vector<int> start(num_machines);
std::vector<int> len(num_machines);
int step = (num_total_features + num_machines - 1) / num_machines;
if (step < 1) { step = 1; }
start[0] = 0;
for (int i = 0; i < num_machines - 1; ++i) {
len[i] = std::min(step, num_total_features - start[i]);
start[i + 1] = start[i] + len[i];
}
len[num_machines - 1] = num_total_features - start[num_machines - 1];
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < len[rank]; ++i) {
OMP_LOOP_EX_BEGIN();
if (ignore_features_.count(start[rank] + i) > 0) {
continue;
}
BinType bin_type = BinType::NumericalBin;
if (categorical_features_.count(start[rank] + i)) {
bin_type = BinType::CategoricalBin;
}
bin_mappers[i].reset(new BinMapper());
if (num_col <= start[rank] + i) {
continue;
}
if (config_.max_bin_by_feature.empty()) {
bin_mappers[i]->FindBin(sample_values[start[rank] + i], num_per_col[start[rank] + i],
total_sample_size, config_.max_bin, config_.min_data_in_bin,
filter_cnt, config_.feature_pre_filter, bin_type, config_.use_missing, config_.zero_as_missing,
forced_bin_bounds[i]);
} else {
bin_mappers[i]->FindBin(sample_values[start[rank] + i], num_per_col[start[rank] + i],
total_sample_size, config_.max_bin_by_feature[start[rank] + i],
config_.min_data_in_bin, filter_cnt, config_.feature_pre_filter, bin_type, config_.use_missing,
config_.zero_as_missing, forced_bin_bounds[i]);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
comm_size_t self_buf_size = 0;
for (int i = 0; i < len[rank]; ++i) {
if (ignore_features_.count(start[rank] + i) > 0) {
continue;
}
self_buf_size += static_cast<comm_size_t>(bin_mappers[i]->SizesInByte());
}
std::vector<char> input_buffer(self_buf_size);
auto cp_ptr = input_buffer.data();
for (int i = 0; i < len[rank]; ++i) {
if (ignore_features_.count(start[rank] + i) > 0) {
continue;
}
bin_mappers[i]->CopyTo(cp_ptr);
cp_ptr += bin_mappers[i]->SizesInByte();
// free
bin_mappers[i].reset(nullptr);
}
std::vector<comm_size_t> size_len = Network::GlobalArray(self_buf_size);
std::vector<comm_size_t> size_start(num_machines, 0);
for (int i = 1; i < num_machines; ++i) {
size_start[i] = size_start[i - 1] + size_len[i - 1];
}
comm_size_t total_buffer_size = size_start[num_machines - 1] + size_len[num_machines - 1];
std::vector<char> output_buffer(total_buffer_size);
// gather global feature bin mappers
Network::Allgather(input_buffer.data(), size_start.data(), size_len.data(), output_buffer.data(), total_buffer_size);
cp_ptr = output_buffer.data();
// restore features bins from buffer
for (int i = 0; i < num_total_features; ++i) {
if (ignore_features_.count(i) > 0) {
bin_mappers[i] = nullptr;
continue;
}
bin_mappers[i].reset(new BinMapper());
bin_mappers[i]->CopyFrom(cp_ptr);
cp_ptr += bin_mappers[i]->SizesInByte();
}
}
auto dataset = std::unique_ptr<Dataset>(new Dataset(num_data));
dataset->Construct(&bin_mappers, num_total_features, forced_bin_bounds, sample_indices, sample_values, num_per_col, num_col, total_sample_size, config_);
if (dataset->has_raw()) {
dataset->ResizeRaw(num_data);
}
dataset->set_feature_names(feature_names_);
return dataset.release();
}
// ---- private functions ----
void DatasetLoader::CheckDataset(const Dataset* dataset, bool is_load_from_binary) {
if (dataset->num_data_ <= 0) {
Log::Fatal("Data file %s is empty", dataset->data_filename_.c_str());
}
if (dataset->feature_names_.size() != static_cast<size_t>(dataset->num_total_features_)) {
Log::Fatal("Size of feature name error, should be %d, got %d", dataset->num_total_features_,
static_cast<int>(dataset->feature_names_.size()));
}
bool is_feature_order_by_group = true;
int last_group = -1;
int last_sub_feature = -1;
// if features are ordered, not need to use hist_buf
for (int i = 0; i < dataset->num_features_; ++i) {
int group = dataset->feature2group_[i];
int sub_feature = dataset->feature2subfeature_[i];
if (group < last_group) {
is_feature_order_by_group = false;
} else if (group == last_group) {
if (sub_feature <= last_sub_feature) {
is_feature_order_by_group = false;
break;
}
}
last_group = group;
last_sub_feature = sub_feature;
}
if (!is_feature_order_by_group) {
Log::Fatal("Features in dataset should be ordered by group");
}
if (is_load_from_binary) {
if (dataset->max_bin_ != config_.max_bin) {
Log::Fatal("Dataset max_bin %d != config %d", dataset->max_bin_, config_.max_bin);
}
if (dataset->min_data_in_bin_ != config_.min_data_in_bin) {
Log::Fatal("Dataset min_data_in_bin %d != config %d", dataset->min_data_in_bin_, config_.min_data_in_bin);
}
if (dataset->use_missing_ != config_.use_missing) {
Log::Fatal("Dataset use_missing %d != config %d", dataset->use_missing_, config_.use_missing);
}
if (dataset->zero_as_missing_ != config_.zero_as_missing) {
Log::Fatal("Dataset zero_as_missing %d != config %d", dataset->zero_as_missing_, config_.zero_as_missing);
}
if (dataset->bin_construct_sample_cnt_ != config_.bin_construct_sample_cnt) {
Log::Fatal("Dataset bin_construct_sample_cnt %d != config %d", dataset->bin_construct_sample_cnt_, config_.bin_construct_sample_cnt);
}
if ((dataset->max_bin_by_feature_.size() != config_.max_bin_by_feature.size()) ||
!std::equal(dataset->max_bin_by_feature_.begin(), dataset->max_bin_by_feature_.end(),
config_.max_bin_by_feature.begin())) {
Log::Fatal("Dataset max_bin_by_feature does not match with config");
}
int label_idx = -1;
if (Common::AtoiAndCheck(config_.label_column.c_str(), &label_idx)) {
if (dataset->label_idx_ != label_idx) {
Log::Fatal("Dataset label_idx %d != config %d", dataset->label_idx_, label_idx);
}
} else {
Log::Info("Recommend use integer for label index when loading data from binary for sanity check.");
}
}
}
std::vector<std::string> DatasetLoader::LoadTextDataToMemory(const char* filename, const Metadata& metadata,
int rank, int num_machines, int* num_global_data,
std::vector<data_size_t>* used_data_indices) {
TextReader<data_size_t> text_reader(filename, config_.header, config_.file_load_progress_interval_bytes);
used_data_indices->clear();
if (num_machines == 1 || config_.pre_partition) {
// read all lines
*num_global_data = text_reader.ReadAllLines();
} else { // need partition data
// get query data
const data_size_t* query_boundaries = metadata.query_boundaries();
if (query_boundaries == nullptr) {
// if not contain query data, minimal sample unit is one record
*num_global_data = text_reader.ReadAndFilterLines([this, rank, num_machines](data_size_t) {
if (random_.NextShort(0, num_machines) == rank) {
return true;
} else {
return false;
}
}, used_data_indices);
} else {
// if contain query data, minimal sample unit is one query
data_size_t num_queries = metadata.num_queries();
data_size_t qid = -1;
bool is_query_used = false;
*num_global_data = text_reader.ReadAndFilterLines(
[this, rank, num_machines, &qid, &query_boundaries, &is_query_used, num_queries]
(data_size_t line_idx) {
if (qid >= num_queries) {
Log::Fatal("Current query exceeds the range of the query file,\n"
"please ensure the query file is correct");
}
if (line_idx >= query_boundaries[qid + 1]) {
// if is new query
is_query_used = false;
if (random_.NextShort(0, num_machines) == rank) {
is_query_used = true;
}
++qid;
}
return is_query_used;
}, used_data_indices);
}
}
return std::move(text_reader.Lines());
}
std::vector<std::string> DatasetLoader::SampleTextDataFromMemory(const std::vector<std::string>& data) {
int sample_cnt = config_.bin_construct_sample_cnt;
if (static_cast<size_t>(sample_cnt) > data.size()) {
sample_cnt = static_cast<int>(data.size());
}
auto sample_indices = random_.Sample(static_cast<int>(data.size()), sample_cnt);
std::vector<std::string> out(sample_indices.size());
for (size_t i = 0; i < sample_indices.size(); ++i) {
const size_t idx = sample_indices[i];
out[i] = data[idx];
}
return out;
}
std::vector<std::string> DatasetLoader::SampleTextDataFromFile(const char* filename, const Metadata& metadata,
int rank, int num_machines, int* num_global_data,
std::vector<data_size_t>* used_data_indices) {
const data_size_t sample_cnt = static_cast<data_size_t>(config_.bin_construct_sample_cnt);
TextReader<data_size_t> text_reader(filename, config_.header, config_.file_load_progress_interval_bytes);
std::vector<std::string> out_data;
if (num_machines == 1 || config_.pre_partition) {
*num_global_data = static_cast<data_size_t>(text_reader.SampleFromFile(&random_, sample_cnt, &out_data));
} else { // need partition data
// get query data
const data_size_t* query_boundaries = metadata.query_boundaries();
if (query_boundaries == nullptr) {
// if not contain query file, minimal sample unit is one record
*num_global_data = text_reader.SampleAndFilterFromFile([this, rank, num_machines]
(data_size_t) {
if (random_.NextShort(0, num_machines) == rank) {
return true;
} else {
return false;
}
}, used_data_indices, &random_, sample_cnt, &out_data);
} else {
// if contain query file, minimal sample unit is one query
data_size_t num_queries = metadata.num_queries();
data_size_t qid = -1;
bool is_query_used = false;
*num_global_data = text_reader.SampleAndFilterFromFile(
[this, rank, num_machines, &qid, &query_boundaries, &is_query_used, num_queries]
(data_size_t line_idx) {
if (qid >= num_queries) {
Log::Fatal("Query id exceeds the range of the query file, "
"please ensure the query file is correct");
}
if (line_idx >= query_boundaries[qid + 1]) {
// if is new query
is_query_used = false;
if (random_.NextShort(0, num_machines) == rank) {
is_query_used = true;
}
++qid;
}
return is_query_used;
}, used_data_indices, &random_, sample_cnt, &out_data);
}
}
return out_data;
}
void DatasetLoader::ConstructBinMappersFromTextData(int rank, int num_machines,
const std::vector<std::string>& sample_data,
const Parser* parser, Dataset* dataset) {
auto t1 = std::chrono::high_resolution_clock::now();
std::vector<std::vector<double>> sample_values;
std::vector<std::vector<int>> sample_indices;
std::vector<std::pair<int, double>> oneline_features;
double label;
for (int i = 0; i < static_cast<int>(sample_data.size()); ++i) {
oneline_features.clear();
// parse features
parser->ParseOneLine(sample_data[i].c_str(), &oneline_features, &label);
for (std::pair<int, double>& inner_data : oneline_features) {
if (static_cast<size_t>(inner_data.first) >= sample_values.size()) {
sample_values.resize(inner_data.first + 1);
sample_indices.resize(inner_data.first + 1);
}
if (std::fabs(inner_data.second) > kZeroThreshold || std::isnan(inner_data.second)) {
sample_values[inner_data.first].emplace_back(inner_data.second);
sample_indices[inner_data.first].emplace_back(i);
}
}
}
dataset->feature_groups_.clear();
dataset->num_total_features_ = std::max(static_cast<int>(sample_values.size()), parser->NumFeatures());
if (num_machines > 1) {
dataset->num_total_features_ = Network::GlobalSyncUpByMax(dataset->num_total_features_);
}
if (!feature_names_.empty()) {
CHECK_EQ(dataset->num_total_features_, static_cast<int>(feature_names_.size()));
}
if (!config_.max_bin_by_feature.empty()) {
CHECK_EQ(static_cast<size_t>(dataset->num_total_features_), config_.max_bin_by_feature.size());
CHECK_GT(*(std::min_element(config_.max_bin_by_feature.begin(), config_.max_bin_by_feature.end())), 1);
}
// get forced split
std::string forced_bins_path = config_.forcedbins_filename;
std::vector<std::vector<double>> forced_bin_bounds = DatasetLoader::GetForcedBins(forced_bins_path,
dataset->num_total_features_,
categorical_features_);
// check the range of label_idx, weight_idx and group_idx
CHECK(label_idx_ >= 0 && label_idx_ <= dataset->num_total_features_);
CHECK(weight_idx_ < 0 || weight_idx_ < dataset->num_total_features_);
CHECK(group_idx_ < 0 || group_idx_ < dataset->num_total_features_);
// fill feature_names_ if not header
if (feature_names_.empty()) {
for (int i = 0; i < dataset->num_total_features_; ++i) {
std::stringstream str_buf;
str_buf << "Column_" << i;
feature_names_.push_back(str_buf.str());
}
}
dataset->set_feature_names(feature_names_);
std::vector<std::unique_ptr<BinMapper>> bin_mappers(dataset->num_total_features_);
const data_size_t filter_cnt = static_cast<data_size_t>(
static_cast<double>(config_.min_data_in_leaf* sample_data.size()) / dataset->num_data_);
// start find bins
if (num_machines == 1) {
// if only one machine, find bin locally
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < static_cast<int>(sample_values.size()); ++i) {
OMP_LOOP_EX_BEGIN();
if (ignore_features_.count(i) > 0) {
bin_mappers[i] = nullptr;
continue;
}
BinType bin_type = BinType::NumericalBin;
if (categorical_features_.count(i)) {
bin_type = BinType::CategoricalBin;
}
bin_mappers[i].reset(new BinMapper());
if (config_.max_bin_by_feature.empty()) {
bin_mappers[i]->FindBin(sample_values[i].data(), static_cast<int>(sample_values[i].size()),
sample_data.size(), config_.max_bin, config_.min_data_in_bin,
filter_cnt, config_.feature_pre_filter, bin_type, config_.use_missing, config_.zero_as_missing,
forced_bin_bounds[i]);
} else {
bin_mappers[i]->FindBin(sample_values[i].data(), static_cast<int>(sample_values[i].size()),
sample_data.size(), config_.max_bin_by_feature[i],
config_.min_data_in_bin, filter_cnt, config_.feature_pre_filter, bin_type, config_.use_missing,
config_.zero_as_missing, forced_bin_bounds[i]);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
} else {
// start and len will store the process feature indices for different machines
// machine i will find bins for features in [ start[i], start[i] + len[i] )
std::vector<int> start(num_machines);
std::vector<int> len(num_machines);
int step = (dataset->num_total_features_ + num_machines - 1) / num_machines;
if (step < 1) { step = 1; }
start[0] = 0;
for (int i = 0; i < num_machines - 1; ++i) {
len[i] = std::min(step, dataset->num_total_features_ - start[i]);
start[i + 1] = start[i] + len[i];
}
len[num_machines - 1] = dataset->num_total_features_ - start[num_machines - 1];
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < len[rank]; ++i) {
OMP_LOOP_EX_BEGIN();
if (ignore_features_.count(start[rank] + i) > 0) {
continue;
}
BinType bin_type = BinType::NumericalBin;
if (categorical_features_.count(start[rank] + i)) {
bin_type = BinType::CategoricalBin;
}
bin_mappers[i].reset(new BinMapper());
if (static_cast<int>(sample_values.size()) <= start[rank] + i) {
continue;
}
if (config_.max_bin_by_feature.empty()) {
bin_mappers[i]->FindBin(sample_values[start[rank] + i].data(),
static_cast<int>(sample_values[start[rank] + i].size()),
sample_data.size(), config_.max_bin, config_.min_data_in_bin,
filter_cnt, config_.feature_pre_filter, bin_type, config_.use_missing, config_.zero_as_missing,
forced_bin_bounds[i]);
} else {
bin_mappers[i]->FindBin(sample_values[start[rank] + i].data(),
static_cast<int>(sample_values[start[rank] + i].size()),
sample_data.size(), config_.max_bin_by_feature[i],
config_.min_data_in_bin, filter_cnt, config_.feature_pre_filter, bin_type,
config_.use_missing, config_.zero_as_missing, forced_bin_bounds[i]);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
comm_size_t self_buf_size = 0;
for (int i = 0; i < len[rank]; ++i) {
if (ignore_features_.count(start[rank] + i) > 0) {
continue;
}
self_buf_size += static_cast<comm_size_t>(bin_mappers[i]->SizesInByte());
}
std::vector<char> input_buffer(self_buf_size);
auto cp_ptr = input_buffer.data();
for (int i = 0; i < len[rank]; ++i) {
if (ignore_features_.count(start[rank] + i) > 0) {
continue;
}
bin_mappers[i]->CopyTo(cp_ptr);
cp_ptr += bin_mappers[i]->SizesInByte();
// free
bin_mappers[i].reset(nullptr);
}
std::vector<comm_size_t> size_len = Network::GlobalArray(self_buf_size);
std::vector<comm_size_t> size_start(num_machines, 0);
for (int i = 1; i < num_machines; ++i) {
size_start[i] = size_start[i - 1] + size_len[i - 1];
}
comm_size_t total_buffer_size = size_start[num_machines - 1] + size_len[num_machines - 1];
std::vector<char> output_buffer(total_buffer_size);
// gather global feature bin mappers
Network::Allgather(input_buffer.data(), size_start.data(), size_len.data(), output_buffer.data(), total_buffer_size);
cp_ptr = output_buffer.data();
// restore features bins from buffer
for (int i = 0; i < dataset->num_total_features_; ++i) {
if (ignore_features_.count(i) > 0) {
bin_mappers[i] = nullptr;
continue;
}
bin_mappers[i].reset(new BinMapper());
bin_mappers[i]->CopyFrom(cp_ptr);
cp_ptr += bin_mappers[i]->SizesInByte();
}
}
dataset->Construct(&bin_mappers, dataset->num_total_features_, forced_bin_bounds, Common::Vector2Ptr<int>(&sample_indices).data(),
Common::Vector2Ptr<double>(&sample_values).data(),
Common::VectorSize<int>(sample_indices).data(), static_cast<int>(sample_indices.size()), sample_data.size(), config_);
if (dataset->has_raw()) {
dataset->ResizeRaw(static_cast<int>(sample_data.size()));
}
auto t2 = std::chrono::high_resolution_clock::now();
Log::Info("Construct bin mappers from text data time %.2f seconds",
std::chrono::duration<double, std::milli>(t2 - t1) * 1e-3);
}
/*! \brief Extract local features from memory */
void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>* text_data, const Parser* parser, Dataset* dataset) {
std::vector<std::pair<int, double>> oneline_features;
double tmp_label = 0.0f;
auto& ref_text_data = *text_data;
std::vector<float> feature_row(dataset->num_features_);
if (!predict_fun_) {
OMP_INIT_EX();
// if doesn't need to prediction with initial model
#pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label, feature_row)
for (data_size_t i = 0; i < dataset->num_data_; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
oneline_features.clear();
// parser
parser->ParseOneLine(ref_text_data[i].c_str(), &oneline_features, &tmp_label);
// set label
dataset->metadata_.SetLabelAt(i, static_cast<label_t>(tmp_label));
// free processed line:
ref_text_data[i].clear();
// shrink_to_fit will be very slow in linux, and seems not free memory, disable for now
// text_reader_->Lines()[i].shrink_to_fit();
std::vector<bool> is_feature_added(dataset->num_features_, false);
// push data
for (auto& inner_data : oneline_features) {
if (inner_data.first >= dataset->num_total_features_) { continue; }
int feature_idx = dataset->used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
is_feature_added[feature_idx] = true;
// if is used feature
int group = dataset->feature2group_[feature_idx];
int sub_feature = dataset->feature2subfeature_[feature_idx];
dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second);
if (dataset->has_raw()) {
feature_row[feature_idx] = static_cast<float>(inner_data.second);
}
} else {
if (inner_data.first == weight_idx_) {
dataset->metadata_.SetWeightAt(i, static_cast<label_t>(inner_data.second));
} else if (inner_data.first == group_idx_) {
dataset->metadata_.SetQueryAt(i, static_cast<data_size_t>(inner_data.second));
}
}
}
if (dataset->has_raw()) {
for (size_t j = 0; j < feature_row.size(); ++j) {
int feat_ind = dataset->numeric_feature_map_[j];
if (feat_ind >= 0) {
dataset->raw_data_[feat_ind][i] = feature_row[j];
}
}
}
dataset->FinishOneRow(tid, i, is_feature_added);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
} else {
OMP_INIT_EX();
// if need to prediction with initial model
std::vector<double> init_score(dataset->num_data_ * num_class_);
#pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label, feature_row)
for (data_size_t i = 0; i < dataset->num_data_; ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
oneline_features.clear();
// parser
parser->ParseOneLine(ref_text_data[i].c_str(), &oneline_features, &tmp_label);
// set initial score
std::vector<double> oneline_init_score(num_class_);
predict_fun_(oneline_features, oneline_init_score.data());
for (int k = 0; k < num_class_; ++k) {
init_score[k * dataset->num_data_ + i] = static_cast<double>(oneline_init_score[k]);
}
// set label
dataset->metadata_.SetLabelAt(i, static_cast<label_t>(tmp_label));
// free processed line:
ref_text_data[i].clear();
// shrink_to_fit will be very slow in Linux, and seems not free memory, disable for now
// text_reader_->Lines()[i].shrink_to_fit();
// push data
std::vector<bool> is_feature_added(dataset->num_features_, false);
for (auto& inner_data : oneline_features) {
if (inner_data.first >= dataset->num_total_features_) { continue; }
int feature_idx = dataset->used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
is_feature_added[feature_idx] = true;
// if is used feature
int group = dataset->feature2group_[feature_idx];
int sub_feature = dataset->feature2subfeature_[feature_idx];
dataset->feature_groups_[group]->PushData(tid, sub_feature, i, inner_data.second);
if (dataset->has_raw()) {
feature_row[feature_idx] = static_cast<float>(inner_data.second);
}
} else {
if (inner_data.first == weight_idx_) {
dataset->metadata_.SetWeightAt(i, static_cast<label_t>(inner_data.second));
} else if (inner_data.first == group_idx_) {
dataset->metadata_.SetQueryAt(i, static_cast<data_size_t>(inner_data.second));
}
}
}
dataset->FinishOneRow(tid, i, is_feature_added);
if (dataset->has_raw()) {
for (size_t j = 0; j < feature_row.size(); ++j) {
int feat_ind = dataset->numeric_feature_map_[j];
if (feat_ind >= 0) {
dataset->raw_data_[feat_ind][i] = feature_row[j];
}
}
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
// metadata_ will manage space of init_score
dataset->metadata_.SetInitScore(init_score.data(), dataset->num_data_ * num_class_);
}
dataset->FinishLoad();
// text data can be free after loaded feature values
text_data->clear();
}
/*! \brief Extract local features from file */
void DatasetLoader::ExtractFeaturesFromFile(const char* filename, const Parser* parser,
const std::vector<data_size_t>& used_data_indices, Dataset* dataset) {
std::vector<double> init_score;
if (predict_fun_) {
init_score = std::vector<double>(dataset->num_data_ * num_class_);
}
std::function<void(data_size_t, const std::vector<std::string>&)> process_fun =
[this, &init_score, &parser, &dataset]
(data_size_t start_idx, const std::vector<std::string>& lines) {
std::vector<std::pair<int, double>> oneline_features;
double tmp_label = 0.0f;
std::vector<float> feature_row(dataset->num_features_);
OMP_INIT_EX();
#pragma omp parallel for schedule(static) private(oneline_features) firstprivate(tmp_label, feature_row)
for (data_size_t i = 0; i < static_cast<data_size_t>(lines.size()); ++i) {
OMP_LOOP_EX_BEGIN();
const int tid = omp_get_thread_num();
oneline_features.clear();
// parser
parser->ParseOneLine(lines[i].c_str(), &oneline_features, &tmp_label);
// set initial score
if (!init_score.empty()) {
std::vector<double> oneline_init_score(num_class_);
predict_fun_(oneline_features, oneline_init_score.data());
for (int k = 0; k < num_class_; ++k) {
init_score[k * dataset->num_data_ + start_idx + i] = static_cast<double>(oneline_init_score[k]);
}
}
// set label
dataset->metadata_.SetLabelAt(start_idx + i, static_cast<label_t>(tmp_label));
std::vector<bool> is_feature_added(dataset->num_features_, false);
// push data
for (auto& inner_data : oneline_features) {
if (inner_data.first >= dataset->num_total_features_) { continue; }
int feature_idx = dataset->used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
is_feature_added[feature_idx] = true;
// if is used feature
int group = dataset->feature2group_[feature_idx];
int sub_feature = dataset->feature2subfeature_[feature_idx];
dataset->feature_groups_[group]->PushData(tid, sub_feature, start_idx + i, inner_data.second);
if (dataset->has_raw()) {
feature_row[feature_idx] = static_cast<float>(inner_data.second);
}
} else {
if (inner_data.first == weight_idx_) {
dataset->metadata_.SetWeightAt(start_idx + i, static_cast<label_t>(inner_data.second));
} else if (inner_data.first == group_idx_) {
dataset->metadata_.SetQueryAt(start_idx + i, static_cast<data_size_t>(inner_data.second));
}
}
}
if (dataset->has_raw()) {
for (size_t j = 0; j < feature_row.size(); ++j) {
int feat_ind = dataset->numeric_feature_map_[j];
if (feat_ind >= 0) {
dataset->raw_data_[feat_ind][i] = feature_row[j];
}
}
}
dataset->FinishOneRow(tid, i, is_feature_added);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
};
TextReader<data_size_t> text_reader(filename, config_.header, config_.file_load_progress_interval_bytes);
if (!used_data_indices.empty()) {
// only need part of data
text_reader.ReadPartAndProcessParallel(used_data_indices, process_fun);
} else {
// need full data
text_reader.ReadAllAndProcessParallel(process_fun);
}
// metadata_ will manage space of init_score
if (!init_score.empty()) {
dataset->metadata_.SetInitScore(init_score.data(), dataset->num_data_ * num_class_);
}
dataset->FinishLoad();
}
/*! \brief Check can load from binary file */
std::string DatasetLoader::CheckCanLoadFromBin(const char* filename) {
std::string bin_filename(filename);
bin_filename.append(".bin");
auto reader = VirtualFileReader::Make(bin_filename.c_str());
if (!reader->Init()) {
bin_filename = std::string(filename);
reader = VirtualFileReader::Make(bin_filename.c_str());
if (!reader->Init()) {
Log::Fatal("Cannot open data file %s", bin_filename.c_str());
}
}
size_t buffer_size = 256;
auto buffer = std::vector<char>(buffer_size);
// read size of token
size_t size_of_token = std::strlen(Dataset::binary_file_token);
size_t read_cnt = reader->Read(buffer.data(), size_of_token);
if (read_cnt == size_of_token
&& std::string(buffer.data()) == std::string(Dataset::binary_file_token)) {
return bin_filename;
} else {
return std::string();
}
}
std::vector<std::vector<double>> DatasetLoader::GetForcedBins(std::string forced_bins_path, int num_total_features,
const std::unordered_set<int>& categorical_features) {
std::vector<std::vector<double>> forced_bins(num_total_features, std::vector<double>());
if (forced_bins_path != "") {
std::ifstream forced_bins_stream(forced_bins_path.c_str());
if (forced_bins_stream.fail()) {
Log::Warning("Could not open %s. Will ignore.", forced_bins_path.c_str());
} else {
std::stringstream buffer;
buffer << forced_bins_stream.rdbuf();
std::string err;
Json forced_bins_json = Json::parse(buffer.str(), &err);
CHECK(forced_bins_json.is_array());
std::vector<Json> forced_bins_arr = forced_bins_json.array_items();
for (size_t i = 0; i < forced_bins_arr.size(); ++i) {
int feature_num = forced_bins_arr[i]["feature"].int_value();
CHECK_LT(feature_num, num_total_features);
if (categorical_features.count(feature_num)) {
Log::Warning("Feature %d is categorical. Will ignore forced bins for this feature.", feature_num);
} else {
std::vector<Json> bounds_arr = forced_bins_arr[i]["bin_upper_bound"].array_items();
for (size_t j = 0; j < bounds_arr.size(); ++j) {
forced_bins[feature_num].push_back(bounds_arr[j].number_value());
}
}
}
// remove duplicates
for (int i = 0; i < num_total_features; ++i) {
auto new_end = std::unique(forced_bins[i].begin(), forced_bins[i].end());
forced_bins[i].erase(new_end, forced_bins[i].end());
}
}
}
return forced_bins;
}
} // namespace LightGBM
| 1 | 31,673 | The testing cases are failing because `SetHeader` does not only handle cases where input are from files. It also reads categorical feature indices from the config parameters (see the part outside the `if (filename != nullptr) { ... }`). Skipping `SetHeader` directly here will cause errors when we load data from numpy or pandas arrays (where `filename == nullptr`) and use categorical features. So I think we should move the the check `filename != nullptr && CheckCanLoadFromBin(filename) == ""` into `SetHeader`. That is, we change `if (filename != nullptr) { ... }` into `if (filename != nullptr && CheckCanLoadFromBin(filename) == "") { ... }` | microsoft-LightGBM | cpp |
@@ -148,8 +148,7 @@ func (m *BackgroundProcessManager) StartProcess(cmd *ManagedProcess) (*process.P
go func() {
err := cmd.Wait()
if err != nil {
- err, ok := err.(*exec.ExitError)
- if ok {
+ if err, ok := err.(*exec.ExitError); ok {
status := err.Sys().(syscall.WaitStatus)
if status.Signaled() && status.Signal() == syscall.SIGTERM {
log.Info("process stopped with SIGTERM signal") | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package bpm
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"sync"
"syscall"
"github.com/shirou/gopsutil/process"
ctrl "sigs.k8s.io/controller-runtime"
)
var log = ctrl.Log.WithName("background-process-manager")
type NsType string
const (
MountNS NsType = "mnt"
// uts namespace is not supported yet
// UtsNS NsType = "uts"
IpcNS NsType = "ipc"
NetNS NsType = "net"
PidNS NsType = "pid"
// user namespace is not supported yet
// UserNS NsType = "user"
)
var nsArgMap = map[NsType]string{
MountNS: "m",
// uts namespace is not supported by nsexec yet
// UtsNS: "u",
IpcNS: "i",
NetNS: "n",
PidNS: "p",
// user namespace is not supported by nsexec yet
// UserNS: "U",
}
const (
pausePath = "/usr/local/bin/pause"
nsexecPath = "/usr/local/bin/nsexec"
DefaultProcPrefix = "/proc"
)
// ProcessPair is an identifier for process
type ProcessPair struct {
Pid int
CreateTime int64
}
// Stdio contains stdin, stdout and stderr
type Stdio struct {
sync.Locker
Stdin, Stdout, Stderr io.ReadWriteCloser
}
// BackgroundProcessManager manages all background processes
type BackgroundProcessManager struct {
deathSig *sync.Map
identifiers *sync.Map
stdio *sync.Map
}
// NewBackgroundProcessManager creates a background process manager
func NewBackgroundProcessManager() BackgroundProcessManager {
return BackgroundProcessManager{
deathSig: &sync.Map{},
identifiers: &sync.Map{},
stdio: &sync.Map{},
}
}
// StartProcess manages a process in manager
func (m *BackgroundProcessManager) StartProcess(cmd *ManagedProcess) (*process.Process, error) {
var identifierLock *sync.Mutex
if cmd.Identifier != nil {
lock, _ := m.identifiers.LoadOrStore(*cmd.Identifier, &sync.Mutex{})
identifierLock = lock.(*sync.Mutex)
identifierLock.Lock()
}
err := cmd.Start()
if err != nil {
log.Error(err, "fail to start process")
return nil, err
}
pid := cmd.Process.Pid
procState, err := process.NewProcess(int32(cmd.Process.Pid))
if err != nil {
return nil, err
}
ct, err := procState.CreateTime()
if err != nil {
return nil, err
}
pair := ProcessPair{
Pid: pid,
CreateTime: ct,
}
channel, _ := m.deathSig.LoadOrStore(pair, make(chan bool, 1))
deathChannel := channel.(chan bool)
stdio := &Stdio{Locker: &sync.Mutex{}}
if cmd.Stdin != nil {
if stdin, ok := cmd.Stdin.(io.ReadWriteCloser); ok {
stdio.Stdin = stdin
}
}
if cmd.Stdout != nil {
if stdout, ok := cmd.Stdout.(io.ReadWriteCloser); ok {
stdio.Stdout = stdout
}
}
if cmd.Stderr != nil {
if stderr, ok := cmd.Stderr.(io.ReadWriteCloser); ok {
stdio.Stderr = stderr
}
}
m.stdio.Store(pair, stdio)
log := log.WithValues("pid", pid)
go func() {
err := cmd.Wait()
if err != nil {
err, ok := err.(*exec.ExitError)
if ok {
status := err.Sys().(syscall.WaitStatus)
if status.Signaled() && status.Signal() == syscall.SIGTERM {
log.Info("process stopped with SIGTERM signal")
}
} else {
log.Error(err, "process exited accidentally")
}
}
log.Info("process stopped")
deathChannel <- true
m.deathSig.Delete(pair)
if io, loaded := m.stdio.LoadAndDelete(pair); loaded {
if stdio, ok := io.(*Stdio); ok {
stdio.Lock()
if stdio.Stdin != nil {
if err = stdio.Stdin.Close(); err != nil {
log.Error(err, "stdin fails to be closed")
}
}
if stdio.Stdout != nil {
if err = stdio.Stdout.Close(); err != nil {
log.Error(err, "stdout fails to be closed")
}
}
if stdio.Stderr != nil {
if err = stdio.Stderr.Close(); err != nil {
log.Error(err, "stderr fails to be closed")
}
}
stdio.Unlock()
}
}
if identifierLock != nil {
identifierLock.Unlock()
m.identifiers.Delete(*cmd.Identifier)
}
}()
return procState, nil
}
// KillBackgroundProcess sends SIGTERM to process
func (m *BackgroundProcessManager) KillBackgroundProcess(ctx context.Context, pid int, startTime int64) error {
log := log.WithValues("pid", pid)
p, err := os.FindProcess(int(pid))
if err != nil {
log.Error(err, "unreachable path. `os.FindProcess` will never return an error on unix")
return err
}
procState, err := process.NewProcess(int32(pid))
if err != nil {
// return successfully as the process has exited
return nil
}
ct, err := procState.CreateTime()
if err != nil {
log.Error(err, "fail to read create time")
// return successfully as the process has exited
return nil
}
// There is a bug in calculating CreateTime in the new version of
// gopsutils. This is a temporary solution before the upstream fixes it.
if startTime-ct > 1000 || ct-startTime > 1000 {
log.Info("process has already been killed", "startTime", ct, "expectedStartTime", startTime)
// return successfully as the process has exited
return nil
}
ppid, err := procState.Ppid()
if err != nil {
log.Error(err, "fail to read parent id")
// return successfully as the process has exited
return nil
}
if ppid != int32(os.Getpid()) {
log.Info("process has already been killed", "ppid", ppid)
// return successfully as the process has exited
return nil
}
err = p.Signal(syscall.SIGTERM)
if err != nil && err.Error() != "os: process already finished" {
log.Error(err, "error while killing process")
return err
}
pair := ProcessPair{
Pid: pid,
CreateTime: startTime,
}
channel, ok := m.deathSig.Load(pair)
if ok {
deathChannel := channel.(chan bool)
select {
case <-deathChannel:
case <-ctx.Done():
return ctx.Err()
}
}
log.Info("Successfully killed process")
return nil
}
func (m *BackgroundProcessManager) Stdio(pid int, startTime int64) *Stdio {
log := log.WithValues("pid", pid)
procState, err := process.NewProcess(int32(pid))
if err != nil {
log.Info("fail to get process information", "pid", pid)
// return successfully as the process has exited
return nil
}
ct, err := procState.CreateTime()
if err != nil {
log.Error(err, "fail to read create time")
// return successfully as the process has exited
return nil
}
// There is a bug in calculating CreateTime in the new version of
// gopsutils. This is a temporary solution before the upstream fixes it.
if startTime-ct > 1000 || ct-startTime > 1000 {
log.Info("process has exited", "startTime", ct, "expectedStartTime", startTime)
// return successfully as the process has exited
return nil
}
pair := ProcessPair{
Pid: pid,
CreateTime: startTime,
}
io, ok := m.stdio.Load(pair)
if !ok {
log.Info("fail to load with pair", "pair", pair)
// stdio is not stored
return nil
}
return io.(*Stdio)
}
// DefaultProcessBuilder returns the default process builder
func DefaultProcessBuilder(cmd string, args ...string) *ProcessBuilder {
return &ProcessBuilder{
cmd: cmd,
args: args,
nsOptions: []nsOption{},
pause: false,
identifier: nil,
ctx: context.Background(),
}
}
// ProcessBuilder builds a exec.Cmd for daemon
type ProcessBuilder struct {
cmd string
args []string
env []string
nsOptions []nsOption
pause bool
localMnt bool
identifier *string
stdin io.ReadWriteCloser
stdout io.ReadWriteCloser
stderr io.ReadWriteCloser
ctx context.Context
}
// GetNsPath returns corresponding namespace path
func GetNsPath(pid uint32, typ NsType) string {
return fmt.Sprintf("%s/%d/ns/%s", DefaultProcPrefix, pid, string(typ))
}
// SetEnv sets the environment variables of the process
func (b *ProcessBuilder) SetEnv(key, value string) *ProcessBuilder {
b.env = append(b.env, fmt.Sprintf("%s=%s", key, value))
return b
}
// SetNS sets the namespace of the process
func (b *ProcessBuilder) SetNS(pid uint32, typ NsType) *ProcessBuilder {
return b.SetNSOpt([]nsOption{{
Typ: typ,
Path: GetNsPath(pid, typ),
}})
}
// SetNSOpt sets the namespace of the process
func (b *ProcessBuilder) SetNSOpt(options []nsOption) *ProcessBuilder {
b.nsOptions = append(b.nsOptions, options...)
return b
}
// SetIdentifier sets the identifier of the process
func (b *ProcessBuilder) SetIdentifier(id string) *ProcessBuilder {
b.identifier = &id
return b
}
// EnablePause enables pause for process
func (b *ProcessBuilder) EnablePause() *ProcessBuilder {
b.pause = true
return b
}
func (b *ProcessBuilder) EnableLocalMnt() *ProcessBuilder {
b.localMnt = true
return b
}
// SetContext sets context for process
func (b *ProcessBuilder) SetContext(ctx context.Context) *ProcessBuilder {
b.ctx = ctx
return b
}
// SetStdin sets stdin for process
func (b *ProcessBuilder) SetStdin(stdin io.ReadWriteCloser) *ProcessBuilder {
b.stdin = stdin
return b
}
// SetStdout sets stdout for process
func (b *ProcessBuilder) SetStdout(stdout io.ReadWriteCloser) *ProcessBuilder {
b.stdout = stdout
return b
}
// SetStderr sets stderr for process
func (b *ProcessBuilder) SetStderr(stderr io.ReadWriteCloser) *ProcessBuilder {
b.stderr = stderr
return b
}
type nsOption struct {
Typ NsType
Path string
}
// ManagedProcess is a process which can be managed by backgroundProcessManager
type ManagedProcess struct {
*exec.Cmd
// If the identifier is not nil, process manager should make sure no other
// process with this identifier is running when executing this command
Identifier *string
}
| 1 | 24,399 | I think we should change the variable name of `err` in `if err, ok := err.(*exec.ExitError); ok `, this re-assignment of `err` still affects the next `else` branch | chaos-mesh-chaos-mesh | go |
@@ -145,6 +145,18 @@ folly::Future<StatusOr<std::vector<SpaceIdName>>> MetaClient::listSpaces() {
});
}
+
+folly::Future<StatusOr<bool>> MetaClient::dropSpace(std::string name) {
+ cpp2::DropSpaceReq req;
+ req.set_space_name(std::move(name));
+ return getResponse(std::move(req), [] (auto client, auto request) {
+ return client->future_dropSpace(request);
+ }, [] (cpp2::ExecResp&& resp) -> bool {
+ return resp.code == cpp2::ErrorCode::SUCCEEDED;
+ });
+}
+
+
folly::Future<StatusOr<bool>> MetaClient::addHosts(const std::vector<HostAddr>& hosts) {
std::vector<nebula::cpp2::HostAddr> thriftHosts;
thriftHosts.resize(hosts.size()); | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "meta/client/MetaClient.h"
#include "network/NetworkUtils.h"
DEFINE_int32(load_data_interval_second, 2 * 60, "Load data interval, unit: second");
DEFINE_string(meta_server_addrs, "", "list of meta server addresses,"
"the format looks like ip1:port1, ip2:port2, ip3:port3");
DEFINE_int32(meta_client_io_threads, 3, "meta client io threads");
namespace nebula {
namespace meta {
MetaClient::MetaClient(std::shared_ptr<folly::IOThreadPoolExecutor> ioThreadPool,
std::vector<HostAddr> addrs)
: ioThreadPool_(ioThreadPool)
, addrs_(std::move(addrs)) {
if (ioThreadPool_ == nullptr) {
ioThreadPool_
= std::make_shared<folly::IOThreadPoolExecutor>(FLAGS_meta_client_io_threads);
}
if (addrs_.empty() && !FLAGS_meta_server_addrs.empty()) {
addrs_ = network::NetworkUtils::toHosts(FLAGS_meta_server_addrs);
}
CHECK(!addrs_.empty());
clientsMan_ = std::make_shared<thrift::ThriftClientManager<
meta::cpp2::MetaServiceAsyncClient>>();
updateActiveHost();
loadDataThreadFunc();
}
MetaClient::~MetaClient() {
loadDataThread_.stop();
loadDataThread_.wait();
VLOG(3) << "~MetaClient";
}
void MetaClient::init() {
CHECK(loadDataThread_.start());
size_t delayMS = FLAGS_load_data_interval_second * 1000 + folly::Random::rand32(900);
loadDataThread_.addTimerTask(delayMS,
FLAGS_load_data_interval_second * 1000,
&MetaClient::loadDataThreadFunc, this);
}
void MetaClient::loadDataThreadFunc() {
auto ret = listSpaces().get();
if (!ret.ok()) {
LOG(ERROR) << "List space failed!";
return;
}
decltype(localCache_) cache;
decltype(spaceIndexByName_) indexByName;
for (auto space : ret.value()) {
auto spaceId = space.first;
auto r = getPartsAlloc(spaceId).get();
if (!r.ok()) {
LOG(ERROR) << "Get parts allocaction failed for spaceId " << spaceId;
return;
}
auto spaceCache = std::make_shared<SpaceInfoCache>();
auto partsAlloc = r.value();
spaceCache->spaceName = space.second;
spaceCache->partsOnHost_ = reverse(partsAlloc);
spaceCache->partsAlloc_ = std::move(partsAlloc);
VLOG(3) << "Load space " << spaceId << ", parts num:" << spaceCache->partsAlloc_.size();
cache.emplace(spaceId, spaceCache);
indexByName.emplace(space.second, spaceId);
}
diff(cache);
{
folly::RWSpinLock::WriteHolder holder(localCacheLock_);
localCache_ = std::move(cache);
spaceIndexByName_ = std::move(indexByName);
}
LOG(INFO) << "Load data completed!";
}
std::unordered_map<HostAddr, std::vector<PartitionID>>
MetaClient::reverse(const PartsAlloc& parts) {
std::unordered_map<HostAddr, std::vector<PartitionID>> hosts;
for (auto& partHost : parts) {
for (auto& h : partHost.second) {
hosts[h].emplace_back(partHost.first);
}
}
return hosts;
}
template<typename Request,
typename RemoteFunc,
typename RespGenerator,
typename RpcResponse,
typename Response>
folly::Future<StatusOr<Response>> MetaClient::getResponse(
Request req,
RemoteFunc remoteFunc,
RespGenerator respGen) {
folly::Promise<StatusOr<Response>> pro;
auto f = pro.getFuture();
auto* evb = ioThreadPool_->getEventBase();
auto client = clientsMan_->client(active_, evb);
remoteFunc(client, std::move(req))
.then(evb, [p = std::move(pro), respGen, this] (folly::Try<RpcResponse>&& t) mutable {
// exception occurred during RPC
if (t.hasException()) {
p.setValue(Status::Error("RPC in MetaClient: %s", t.exception().what()));
return;
}
// errored
auto&& resp = t.value();
if (resp.code != cpp2::ErrorCode::SUCCEEDED) {
p.setValue(this->handleResponse(resp));
return;
}
// succeeded
p.setValue(respGen(std::move(resp)));
});
return f;
}
folly::Future<StatusOr<GraphSpaceID>>
MetaClient::createSpace(std::string name, int32_t partsNum, int32_t replicaFactor) {
cpp2::CreateSpaceReq req;
req.set_space_name(std::move(name));
req.set_parts_num(partsNum);
req.set_replica_factor(replicaFactor);
return getResponse(std::move(req), [] (auto client, auto request) {
return client->future_createSpace(request);
}, [] (cpp2::ExecResp&& resp) -> GraphSpaceID {
return resp.get_id().get_space_id();
});
}
folly::Future<StatusOr<std::vector<SpaceIdName>>> MetaClient::listSpaces() {
cpp2::ListSpacesReq req;
return getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listSpaces(request);
}, [this] (cpp2::ListSpacesResp&& resp) -> decltype(auto) {
return this->toSpaceIdName(resp.get_spaces());
});
}
folly::Future<StatusOr<bool>> MetaClient::addHosts(const std::vector<HostAddr>& hosts) {
std::vector<nebula::cpp2::HostAddr> thriftHosts;
thriftHosts.resize(hosts.size());
std::transform(hosts.begin(), hosts.end(), thriftHosts.begin(), [](const auto& h) {
nebula::cpp2::HostAddr th;
th.set_ip(h.first);
th.set_port(h.second);
return th;
});
cpp2::AddHostsReq req;
req.set_hosts(std::move(thriftHosts));
return getResponse(std::move(req), [] (auto client, auto request) {
return client->future_addHosts(request);
}, [] (cpp2::ExecResp&& resp) -> bool {
return resp.code == cpp2::ErrorCode::SUCCEEDED;
});
}
folly::Future<StatusOr<std::vector<HostAddr>>> MetaClient::listHosts() {
cpp2::ListHostsReq req;
return getResponse(std::move(req), [] (auto client, auto request) {
return client->future_listHosts(request);
}, [this] (cpp2::ListHostsResp&& resp) -> decltype(auto) {
return this->to(resp.hosts);
});
}
folly::Future<StatusOr<std::unordered_map<PartitionID, std::vector<HostAddr>>>>
MetaClient::getPartsAlloc(GraphSpaceID spaceId) {
cpp2::GetPartsAllocReq req;
req.set_space_id(spaceId);
return getResponse(std::move(req), [] (auto client, auto request) {
return client->future_getPartsAlloc(request);
}, [this] (cpp2::GetPartsAllocResp&& resp) -> decltype(auto) {
std::unordered_map<PartitionID, std::vector<HostAddr>> parts;
for (auto it = resp.parts.begin(); it != resp.parts.end(); it++) {
parts.emplace(it->first, to(it->second));
}
return parts;
});
}
StatusOr<GraphSpaceID>
MetaClient::getSpaceIdByNameFromCache(const std::string& name) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = spaceIndexByName_.find(name);
if (it != spaceIndexByName_.end()) {
return it->second;
}
return Status::SpaceNotFound();
}
std::vector<HostAddr> MetaClient::to(const std::vector<nebula::cpp2::HostAddr>& tHosts) {
std::vector<HostAddr> hosts;
hosts.resize(tHosts.size());
std::transform(tHosts.begin(), tHosts.end(), hosts.begin(), [](const auto& h) {
return HostAddr(h.get_ip(), h.get_port());
});
return hosts;
}
std::vector<SpaceIdName> MetaClient::toSpaceIdName(const std::vector<cpp2::IdName>& tIdNames) {
std::vector<SpaceIdName> idNames;
idNames.resize(tIdNames.size());
std::transform(tIdNames.begin(), tIdNames.end(), idNames.begin(), [] (const auto& tin) {
return SpaceIdName(tin.id.get_space_id(), tin.name);
});
return idNames;
}
template<typename RESP>
Status MetaClient::handleResponse(const RESP& resp) {
switch (resp.get_code()) {
case cpp2::ErrorCode::SUCCEEDED:
return Status::OK();
case cpp2::ErrorCode::E_SPACE_EXISTED:
return Status::Error("space existed!");
case cpp2::ErrorCode::E_LEADER_CHANGED:
return Status::Error("Leader changed!");
default:
return Status::Error("Unknown code %d", static_cast<int32_t>(resp.get_code()));
}
}
PartsMap MetaClient::doGetPartsMap(const HostAddr& host,
const std::unordered_map<
GraphSpaceID,
std::shared_ptr<SpaceInfoCache>>& localCache) {
PartsMap partMap;
for (auto it = localCache.begin(); it != localCache.end(); it++) {
auto spaceId = it->first;
auto& cache = it->second;
auto partsIt = cache->partsOnHost_.find(host);
if (partsIt != cache->partsOnHost_.end()) {
for (auto& partId : partsIt->second) {
auto partAllocIter = cache->partsAlloc_.find(partId);
CHECK(partAllocIter != cache->partsAlloc_.end());
auto& partM = partMap[spaceId][partId];
partM.spaceId_ = spaceId;
partM.partId_ = partId;
partM.peers_ = partAllocIter->second;
}
}
}
return partMap;
}
PartsMap MetaClient::getPartsMapFromCache(const HostAddr& host) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
return doGetPartsMap(host, localCache_);
}
PartMeta MetaClient::getPartMetaFromCache(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
CHECK(it != localCache_.end());
auto& cache = it->second;
auto partAllocIter = cache->partsAlloc_.find(partId);
CHECK(partAllocIter != cache->partsAlloc_.end());
PartMeta pm;
pm.spaceId_ = spaceId;
pm.partId_ = partId;
pm.peers_ = partAllocIter->second;
return pm;
}
bool MetaClient::checkPartExistInCache(const HostAddr& host,
GraphSpaceID spaceId,
PartitionID partId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it != localCache_.end()) {
auto partsIt = it->second->partsOnHost_.find(host);
if (partsIt != it->second->partsOnHost_.end()) {
for (auto& pId : partsIt->second) {
if (pId == partId) {
return true;
}
}
}
}
return false;
}
bool MetaClient::checkSpaceExistInCache(const HostAddr& host,
GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
if (it != localCache_.end()) {
auto partsIt = it->second->partsOnHost_.find(host);
if (partsIt != it->second->partsOnHost_.end() && !partsIt->second.empty()) {
return true;
}
}
return false;
}
int32_t MetaClient::partsNum(GraphSpaceID spaceId) {
folly::RWSpinLock::ReadHolder holder(localCacheLock_);
auto it = localCache_.find(spaceId);
CHECK(it != localCache_.end());
return it->second->partsAlloc_.size();
}
void MetaClient::diff(const std::unordered_map<GraphSpaceID,
std::shared_ptr<SpaceInfoCache>>& newCache) {
if (listener_ == nullptr) {
return;
}
auto localHost = listener_->getLocalHost();
auto newPartsMap = doGetPartsMap(localHost, newCache);
auto oldPartsMap = getPartsMapFromCache(localHost);
VLOG(1) << "Let's check if any new parts added/updated....";
for (auto it = newPartsMap.begin(); it != newPartsMap.end(); it++) {
auto spaceId = it->first;
const auto& newParts = it->second;
auto oldIt = oldPartsMap.find(spaceId);
if (oldIt == oldPartsMap.end()) {
VLOG(1) << "SpaceId " << spaceId << " was added!";
listener_->onSpaceAdded(spaceId);
for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) {
listener_->onPartAdded(partIt->second);
}
} else {
const auto& oldParts = oldIt->second;
for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) {
auto oldPartIt = oldParts.find(partIt->first);
if (oldPartIt == oldParts.end()) {
VLOG(1) << "SpaceId " << spaceId << ", partId "
<< partIt->first << " was added!";
listener_->onPartAdded(partIt->second);
} else {
const auto& oldPartMeta = oldPartIt->second;
const auto& newPartMeta = partIt->second;
if (oldPartMeta != newPartMeta) {
VLOG(1) << "SpaceId " << spaceId
<< ", partId " << partIt->first << " was updated!";
listener_->onPartUpdated(newPartMeta);
}
}
}
}
}
VLOG(1) << "Let's check if any old parts removed....";
for (auto it = oldPartsMap.begin(); it != oldPartsMap.end(); it++) {
auto spaceId = it->first;
const auto& oldParts = it->second;
auto newIt = newPartsMap.find(spaceId);
if (newIt == newPartsMap.end()) {
VLOG(1) << "SpaceId " << spaceId << " was removed!";
for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) {
listener_->onPartRemoved(spaceId, partIt->first);
}
listener_->onSpaceRemoved(spaceId);
} else {
const auto& newParts = newIt->second;
for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) {
auto newPartIt = newParts.find(partIt->first);
if (newPartIt == newParts.end()) {
VLOG(1) << "SpaceId " << spaceId
<< ", partId " << partIt->first << " was removed!";
listener_->onPartRemoved(spaceId, partIt->first);
}
}
}
}
}
} // namespace meta
} // namespace nebula
| 1 | 15,478 | `Status` has identical semantics with `StatusOr<bool>`. | vesoft-inc-nebula | cpp |
@@ -75,6 +75,7 @@ var allowedPaths = []string{
"/readyz",
"/mutate/acnp",
"/mutate/anp",
+ "/mutate/addlabels",
"/validate/tier",
"/validate/acnp",
"/validate/anp", | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"io/ioutil"
"net"
"os"
"path"
"time"
genericopenapi "k8s.io/apiserver/pkg/endpoints/openapi"
genericapiserver "k8s.io/apiserver/pkg/server"
genericoptions "k8s.io/apiserver/pkg/server/options"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"github.com/vmware-tanzu/antrea/pkg/apiserver"
"github.com/vmware-tanzu/antrea/pkg/apiserver/certificate"
"github.com/vmware-tanzu/antrea/pkg/apiserver/openapi"
"github.com/vmware-tanzu/antrea/pkg/apiserver/storage"
crdinformers "github.com/vmware-tanzu/antrea/pkg/client/informers/externalversions"
"github.com/vmware-tanzu/antrea/pkg/controller/metrics"
"github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy"
"github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy/store"
"github.com/vmware-tanzu/antrea/pkg/controller/querier"
"github.com/vmware-tanzu/antrea/pkg/controller/stats"
"github.com/vmware-tanzu/antrea/pkg/controller/traceflow"
"github.com/vmware-tanzu/antrea/pkg/features"
"github.com/vmware-tanzu/antrea/pkg/k8s"
"github.com/vmware-tanzu/antrea/pkg/log"
"github.com/vmware-tanzu/antrea/pkg/monitor"
"github.com/vmware-tanzu/antrea/pkg/signals"
"github.com/vmware-tanzu/antrea/pkg/util/cipher"
"github.com/vmware-tanzu/antrea/pkg/version"
)
const (
// informerDefaultResync is the default resync period if a handler doesn't specify one.
// Use the same default value as kube-controller-manager:
// https://github.com/kubernetes/kubernetes/blob/release-1.17/pkg/controller/apis/config/v1alpha1/defaults.go#L120
informerDefaultResync = 12 * time.Hour
// serverMinWatchTimeout determines the timeout allocated to watches from Antrea
// clients. Each watch will be allocated a random timeout between this value and twice this
// value, to help randomly distribute reconnections over time.
// This parameter corresponds to the MinRequestTimeout server config parameter in
// https://godoc.org/k8s.io/apiserver/pkg/server#Config.
// When the Antrea client re-creates a watch, all relevant NetworkPolicy objects need to be
// sent again by the controller. It may be a good idea to use a value which is larger than
// the kube-apiserver default (1800s). The K8s documentation states that clients should be
// able to handle watch timeouts gracefully but recommends using a large value in
// production.
serverMinWatchTimeout = 2 * time.Hour
)
var allowedPaths = []string{
"/healthz",
"/livez",
"/readyz",
"/mutate/acnp",
"/mutate/anp",
"/validate/tier",
"/validate/acnp",
"/validate/anp",
"/validate/clustergroup",
}
// run starts Antrea Controller with the given options and waits for termination signal.
func run(o *Options) error {
klog.Infof("Starting Antrea Controller (version %s)", version.GetFullVersion())
// Create K8s Clientset, Aggregator Clientset, CRD Clientset and SharedInformerFactory for the given config.
// Aggregator Clientset is used to update the CABundle of the APIServices backed by antrea-controller so that
// the aggregator can verify its serving certificate.
client, aggregatorClient, crdClient, err := k8s.CreateClients(o.config.ClientConnection, "")
if err != nil {
return fmt.Errorf("error creating K8s clients: %v", err)
}
informerFactory := informers.NewSharedInformerFactory(client, informerDefaultResync)
crdInformerFactory := crdinformers.NewSharedInformerFactory(crdClient, informerDefaultResync)
podInformer := informerFactory.Core().V1().Pods()
namespaceInformer := informerFactory.Core().V1().Namespaces()
serviceInformer := informerFactory.Core().V1().Services()
networkPolicyInformer := informerFactory.Networking().V1().NetworkPolicies()
nodeInformer := informerFactory.Core().V1().Nodes()
cnpInformer := crdInformerFactory.Security().V1alpha1().ClusterNetworkPolicies()
externalEntityInformer := crdInformerFactory.Core().V1alpha2().ExternalEntities()
anpInformer := crdInformerFactory.Security().V1alpha1().NetworkPolicies()
tierInformer := crdInformerFactory.Security().V1alpha1().Tiers()
traceflowInformer := crdInformerFactory.Ops().V1alpha1().Traceflows()
cgInformer := crdInformerFactory.Core().V1alpha2().ClusterGroups()
// Create Antrea object storage.
addressGroupStore := store.NewAddressGroupStore()
appliedToGroupStore := store.NewAppliedToGroupStore()
networkPolicyStore := store.NewNetworkPolicyStore()
groupStore := store.NewGroupStore()
networkPolicyController := networkpolicy.NewNetworkPolicyController(client,
crdClient,
podInformer,
namespaceInformer,
serviceInformer,
externalEntityInformer,
networkPolicyInformer,
cnpInformer,
anpInformer,
tierInformer,
cgInformer,
addressGroupStore,
appliedToGroupStore,
networkPolicyStore,
groupStore)
var networkPolicyStatusController *networkpolicy.StatusController
if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
networkPolicyStatusController = networkpolicy.NewStatusController(crdClient, networkPolicyStore, cnpInformer, anpInformer)
}
endpointQuerier := networkpolicy.NewEndpointQuerier(networkPolicyController)
controllerQuerier := querier.NewControllerQuerier(networkPolicyController, o.config.APIPort)
controllerMonitor := monitor.NewControllerMonitor(crdClient, nodeInformer, controllerQuerier)
var traceflowController *traceflow.Controller
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
traceflowController = traceflow.NewTraceflowController(crdClient, podInformer, traceflowInformer)
}
// statsAggregator takes stats summaries from antrea-agents, aggregates them, and serves the Stats APIs with the
// aggregated data. For now it's only used for NetworkPolicy stats.
var statsAggregator *stats.Aggregator
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
statsAggregator = stats.NewAggregator(networkPolicyInformer, cnpInformer, anpInformer)
}
cipherSuites, err := cipher.GenerateCipherSuitesList(o.config.TLSCipherSuites)
if err != nil {
return fmt.Errorf("error generating Cipher Suite list: %v", err)
}
apiServerConfig, err := createAPIServerConfig(o.config.ClientConnection.Kubeconfig,
client,
aggregatorClient,
o.config.SelfSignedCert,
o.config.APIPort,
addressGroupStore,
appliedToGroupStore,
networkPolicyStore,
groupStore,
controllerQuerier,
endpointQuerier,
networkPolicyController,
networkPolicyStatusController,
statsAggregator,
o.config.EnablePrometheusMetrics,
cipherSuites,
cipher.TLSVersionMap[o.config.TLSMinVersion])
if err != nil {
return fmt.Errorf("error creating API server config: %v", err)
}
apiServer, err := apiServerConfig.Complete(informerFactory).New()
if err != nil {
return fmt.Errorf("error creating API server: %v", err)
}
err = apiserver.CleanupDeprecatedAPIServices(aggregatorClient)
if err != nil {
return fmt.Errorf("failed to clean up the deprecated APIServices: %v", err)
}
// Set up signal capture: the first SIGTERM / SIGINT signal is handled gracefully and will
// cause the stopCh channel to be closed; if another signal is received before the program
// exits, we will force exit.
stopCh := signals.RegisterSignalHandlers()
log.StartLogFileNumberMonitor(stopCh)
informerFactory.Start(stopCh)
crdInformerFactory.Start(stopCh)
go controllerMonitor.Run(stopCh)
go networkPolicyController.Run(stopCh)
go apiServer.Run(stopCh)
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
go statsAggregator.Run(stopCh)
}
if o.config.EnablePrometheusMetrics {
metrics.InitializePrometheusMetrics()
}
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
go traceflowController.Run(stopCh)
}
if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
go networkPolicyStatusController.Run(stopCh)
}
<-stopCh
klog.Info("Stopping Antrea controller")
return nil
}
func createAPIServerConfig(kubeconfig string,
client clientset.Interface,
aggregatorClient aggregatorclientset.Interface,
selfSignedCert bool,
bindPort int,
addressGroupStore storage.Interface,
appliedToGroupStore storage.Interface,
networkPolicyStore storage.Interface,
groupStore storage.Interface,
controllerQuerier querier.ControllerQuerier,
endpointQuerier networkpolicy.EndpointQuerier,
npController *networkpolicy.NetworkPolicyController,
networkPolicyStatusController *networkpolicy.StatusController,
statsAggregator *stats.Aggregator,
enableMetrics bool,
cipherSuites []uint16,
tlsMinVersion uint16) (*apiserver.Config, error) {
secureServing := genericoptions.NewSecureServingOptions().WithLoopback()
authentication := genericoptions.NewDelegatingAuthenticationOptions()
authorization := genericoptions.NewDelegatingAuthorizationOptions().WithAlwaysAllowPaths(allowedPaths...)
caCertController, err := certificate.ApplyServerCert(selfSignedCert, client, aggregatorClient, secureServing)
if err != nil {
return nil, fmt.Errorf("error applying server cert: %v", err)
}
secureServing.BindPort = bindPort
secureServing.BindAddress = net.ParseIP("0.0.0.0")
// kubeconfig file is useful when antrea-controller isn't not running as a pod, like during development.
if len(kubeconfig) > 0 {
authentication.RemoteKubeConfigFile = kubeconfig
authorization.RemoteKubeConfigFile = kubeconfig
}
serverConfig := genericapiserver.NewConfig(apiserver.Codecs)
if err := secureServing.ApplyTo(&serverConfig.SecureServing, &serverConfig.LoopbackClientConfig); err != nil {
return nil, err
}
if err := authentication.ApplyTo(&serverConfig.Authentication, serverConfig.SecureServing, nil); err != nil {
return nil, err
}
if err := authorization.ApplyTo(&serverConfig.Authorization); err != nil {
return nil, err
}
if err := os.MkdirAll(path.Dir(apiserver.TokenPath), os.ModeDir); err != nil {
return nil, fmt.Errorf("error when creating dirs of token file: %v", err)
}
if err := ioutil.WriteFile(apiserver.TokenPath, []byte(serverConfig.LoopbackClientConfig.BearerToken), 0600); err != nil {
return nil, fmt.Errorf("error when writing loopback access token to file: %v", err)
}
serverConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(
openapi.GetOpenAPIDefinitions,
genericopenapi.NewDefinitionNamer(apiserver.Scheme))
serverConfig.OpenAPIConfig.Info.Title = "Antrea"
serverConfig.EnableMetrics = enableMetrics
serverConfig.MinRequestTimeout = int(serverMinWatchTimeout.Seconds())
serverConfig.SecureServing.CipherSuites = cipherSuites
serverConfig.SecureServing.MinTLSVersion = tlsMinVersion
return apiserver.NewConfig(
serverConfig,
addressGroupStore,
appliedToGroupStore,
networkPolicyStore,
groupStore,
caCertController,
statsAggregator,
controllerQuerier,
networkPolicyStatusController,
endpointQuerier,
npController), nil
}
| 1 | 30,515 | why is this named in a different style from other paths? | antrea-io-antrea | go |
@@ -73,13 +73,13 @@ func (r *AWSClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reter
return reconcile.Result{}, err
}
- if isPaused(cluster, awsCluster) {
- log.Info("AWSCluster or linked Cluster is marked as paused. Won't reconcile")
+ if cluster == nil {
+ log.Info("Cluster Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
- if cluster == nil {
- log.Info("Cluster Controller has not yet set OwnerRef")
+ if util.IsPaused(cluster, awsCluster) {
+ log.Info("AWSCluster or linked Cluster is marked as paused. Won't reconcile")
return reconcile.Result{}, nil
}
| 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"net"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/elb"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AWSClusterReconciler reconciles a AwsCluster object
type AWSClusterReconciler struct {
client.Client
Recorder record.EventRecorder
Log logr.Logger
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusters/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
func (r *AWSClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
log := r.Log.WithValues("namespace", req.Namespace, "awsCluster", req.Name)
// Fetch the AWSCluster instance
awsCluster := &infrav1.AWSCluster{}
err := r.Get(ctx, req.NamespacedName, awsCluster)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, awsCluster.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if isPaused(cluster, awsCluster) {
log.Info("AWSCluster or linked Cluster is marked as paused. Won't reconcile")
return reconcile.Result{}, nil
}
if cluster == nil {
log.Info("Cluster Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
log = log.WithValues("cluster", cluster.Name)
// Create the scope.
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: log,
Cluster: cluster,
AWSCluster: awsCluster,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AWSCluster changes.
defer func() {
if err := clusterScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted clusters
if !awsCluster.DeletionTimestamp.IsZero() {
return reconcileDelete(clusterScope)
}
// Handle non-deleted clusters
return reconcileNormal(clusterScope)
}
// TODO(ncdc): should this be a function on ClusterScope?
func reconcileDelete(clusterScope *scope.ClusterScope) (reconcile.Result, error) {
clusterScope.Info("Reconciling AWSCluster delete")
ec2svc := ec2.NewService(clusterScope)
elbsvc := elb.NewService(clusterScope)
awsCluster := clusterScope.AWSCluster
if err := elbsvc.DeleteLoadbalancers(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting load balancer for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
if err := ec2svc.DeleteBastion(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting bastion for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
if err := ec2svc.DeleteNetwork(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting network for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
// Cluster is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(clusterScope.AWSCluster, infrav1.ClusterFinalizer)
return reconcile.Result{}, nil
}
// TODO(ncdc): should this be a function on ClusterScope?
func reconcileNormal(clusterScope *scope.ClusterScope) (reconcile.Result, error) {
clusterScope.Info("Reconciling AWSCluster")
awsCluster := clusterScope.AWSCluster
// If the AWSCluster doesn't have our finalizer, add it.
controllerutil.AddFinalizer(awsCluster, infrav1.ClusterFinalizer)
// Register the finalizer immediately to avoid orphaning AWS resources on delete
if err := clusterScope.PatchObject(); err != nil {
return reconcile.Result{}, err
}
ec2Service := ec2.NewService(clusterScope)
elbService := elb.NewService(clusterScope)
if err := ec2Service.ReconcileNetwork(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile network for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
if err := ec2Service.ReconcileBastion(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile bastion host for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
if err := elbService.ReconcileLoadbalancers(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile load balancers for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
if awsCluster.Status.Network.APIServerELB.DNSName == "" {
clusterScope.Info("Waiting on API server ELB DNS name")
return reconcile.Result{RequeueAfter: 15 * time.Second}, nil
}
if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil {
clusterScope.Info("Waiting on API server ELB DNS name to resolve")
return reconcile.Result{RequeueAfter: 15 * time.Second}, nil
}
awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{
Host: awsCluster.Status.Network.APIServerELB.DNSName,
Port: clusterScope.APIServerPort(),
}
for _, subnet := range clusterScope.Subnets().FilterPrivate() {
found := false
for _, az := range awsCluster.Status.Network.APIServerELB.AvailabilityZones {
if az == subnet.AvailabilityZone {
found = true
break
}
}
clusterScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{
ControlPlane: found,
})
}
awsCluster.Status.Ready = true
return reconcile.Result{}, nil
}
func (r *AWSClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
controller, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AWSCluster{}).
WithEventFilter(pausedPredicates(r.Log)).
Build(r)
if err != nil {
return errors.Wrap(err, "error creating controller")
}
return controller.Watch(
&source.Kind{Type: &clusterv1.Cluster{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: handler.ToRequestsFunc(r.requeueAWSClusterForUnpausedCluster),
},
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
oldCluster := e.ObjectOld.(*clusterv1.Cluster)
newCluster := e.ObjectNew.(*clusterv1.Cluster)
log := r.Log.WithValues("predicate", "updateEvent", "namespace", newCluster.Namespace, "cluster", newCluster.Name)
switch {
// return true if Cluster.Spec.Paused has changed from true to false
case oldCluster.Spec.Paused && !newCluster.Spec.Paused:
log.V(4).Info("Cluster was unpaused, will attempt to map associated AWSCluster.")
return true
// otherwise, return false
default:
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSCluster.")
return false
}
},
CreateFunc: func(e event.CreateEvent) bool {
cluster := e.Object.(*clusterv1.Cluster)
log := r.Log.WithValues("predicate", "createEvent", "namespace", cluster.Namespace, "cluster", cluster.Name)
// Only need to trigger a reconcile if the Cluster.Spec.Paused is false
if !cluster.Spec.Paused {
log.V(4).Info("Cluster is not paused, will attempt to map associated AWSCluster.")
return true
}
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSCluster.")
return false
},
DeleteFunc: func(e event.DeleteEvent) bool {
log := r.Log.WithValues("predicate", "deleteEvent", "namespace", e.Meta.GetNamespace(), "cluster", e.Meta.GetName())
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSCluster.")
return false
},
GenericFunc: func(e event.GenericEvent) bool {
log := r.Log.WithValues("predicate", "genericEvent", "namespace", e.Meta.GetNamespace(), "cluster", e.Meta.GetName())
log.V(4).Info("Cluster did not match expected conditions, will not attempt to map associated AWSCluster.")
return false
},
},
)
}
func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(o handler.MapObject) []ctrl.Request {
c := o.Object.(*clusterv1.Cluster)
log := r.Log.WithValues("objectMapper", "clusterToAWSCluster", "namespace", c.Namespace, "cluster", c.Name)
// Don't handle deleted clusters
if !c.ObjectMeta.DeletionTimestamp.IsZero() {
log.V(4).Info("Cluster has a deletion timestamp, skipping mapping.")
return nil
}
// Make sure the ref is set
if c.Spec.InfrastructureRef == nil {
log.V(4).Info("Cluster does not have an InfrastructureRef, skipping mapping.")
return nil
}
if c.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSCluster" {
log.V(4).Info("Cluster has an InfrastructureRef for a different type, skipping mapping.")
return nil
}
log.V(4).Info("Adding request.", "awsCluster", c.Spec.InfrastructureRef.Name)
return []ctrl.Request{
{
NamespacedName: client.ObjectKey{Namespace: c.Namespace, Name: c.Spec.InfrastructureRef.Name},
},
}
}
| 1 | 14,505 | Swapped these to ensure that we aren't passing a nil cluster in to util.IsPaused | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -25,13 +25,18 @@ import (
api "google.golang.org/api/compute/v1"
)
+type platformPkgManagerTuple struct {
+ platform string
+ pkgManager string
+}
+
const (
packageInstalledString = "package is installed"
packageNotInstalledString = "package is not installed"
)
var (
- pkgManagers = []string{"apt", "yum"}
+ tuples = []platformPkgManagerTuple{{"debian", "apt"}, {"centos", "yum"}, {"rhel", "yum"}}
)
// vf is the the vertificationFunction that is used in each testSetup during assertion of test case. | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package packagemanagement
import (
"fmt"
"path"
"time"
osconfigpb "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/google.golang.org/genproto/googleapis/cloud/osconfig/v1alpha1"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/compute"
osconfigserver "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/osconfig_server"
api "google.golang.org/api/compute/v1"
)
const (
packageInstalledString = "package is installed"
packageNotInstalledString = "package is not installed"
)
var (
pkgManagers = []string{"apt", "yum"}
)
// vf is the the vertificationFunction that is used in each testSetup during assertion of test case.
var vf = func(inst *compute.Instance, vfString string, port int64, interval, timeout time.Duration) error {
return inst.WaitForSerialOutput(vfString, port, interval, timeout)
}
func addCreateOsConfigTest(pkgTestSetup []*packageManagementTestSetup) []*packageManagementTestSetup {
testName := "createosconfigtest"
desc := "test osconfig creation"
packageName := "cowsay"
for _, pkgManager := range pkgManagers {
var oc *osconfigpb.OsConfig
var image string
switch pkgManager {
case "apt":
image = debianImage
pkgs := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(fmt.Sprintf("%s-%s", path.Base(image), testName), desc, osconfigserver.BuildAptPackageConfig(pkgs, nil, nil), nil, nil, nil, nil)
case "yum":
image = centosImage
pkgs := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(fmt.Sprintf("%s-%s", path.Base(image), testName), desc, nil, osconfigserver.BuildYumPackageConfig(pkgs, nil, nil), nil, nil, nil)
default:
panic(fmt.Sprintf("non existent package manager: %s", pkgManager))
}
setup := packageManagementTestSetup{
image: image,
name: fmt.Sprintf("%s-%s", path.Base(image), testName),
osconfig: oc,
assignment: nil,
fname: testName,
vf: vf,
}
pkgTestSetup = append(pkgTestSetup, &setup)
}
return pkgTestSetup
}
func addPackageInstallTest(pkgTestSetup []*packageManagementTestSetup) []*packageManagementTestSetup {
testName := "packageinstalltest"
desc := "test package installation"
packageName := "cowsay"
for _, pkgManager := range pkgManagers {
var oc *osconfigpb.OsConfig
var image, vs string
switch pkgManager {
case "apt":
image = debianImage
pkgs := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(testName, desc, osconfigserver.BuildAptPackageConfig(pkgs, nil, nil), nil, nil, nil, nil)
vs = fmt.Sprintf(packageInstalledString)
case "yum":
image = centosImage
pkgs := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(testName, desc, nil, osconfigserver.BuildYumPackageConfig(pkgs, nil, nil), nil, nil, nil)
vs = fmt.Sprintf(packageInstalledString)
default:
panic(fmt.Sprintf("non existent package manager: %s", pkgManager))
}
instanceName := fmt.Sprintf("%s-%s", path.Base(image), testName)
assign := osconfigserver.BuildAssignment(testName, desc, osconfigserver.BuildInstanceFilterExpression(instanceName), []string{fmt.Sprintf("projects/%s/osConfigs/%s", testProjectID, oc.Name)})
ss := getPackageInstallStartupScript(pkgManager, packageName)
setup := packageManagementTestSetup{
image: image,
name: instanceName,
osconfig: oc,
assignment: assign,
fname: testName,
vf: vf,
vstring: vs,
startup: &api.MetadataItems{
Key: "startup-script",
Value: &ss,
},
}
pkgTestSetup = append(pkgTestSetup, &setup)
}
return pkgTestSetup
}
func addPackageRemovalTest(pkgTestSetup []*packageManagementTestSetup) []*packageManagementTestSetup {
testName := "packageremovaltest"
desc := "test package removal"
packageName := "cowsay"
for _, pkgManager := range pkgManagers {
var oc *osconfigpb.OsConfig
var image, vs string
switch pkgManager {
case "apt":
image = debianImage
pkgs := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(testName, desc, osconfigserver.BuildAptPackageConfig(nil, pkgs, nil), nil, nil, nil, nil)
vs = fmt.Sprintf(packageNotInstalledString)
case "yum":
image = centosImage
removePkg := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(testName, desc, nil, osconfigserver.BuildYumPackageConfig(nil, removePkg, nil), nil, nil, nil)
vs = fmt.Sprintf(packageNotInstalledString)
default:
panic(fmt.Sprintf("non existent package manager: %s", pkgManager))
}
instanceName := fmt.Sprintf("%s-%s", path.Base(image), testName)
assign := osconfigserver.BuildAssignment(testName, desc, osconfigserver.BuildInstanceFilterExpression(instanceName), []string{fmt.Sprintf("projects/%s/osConfigs/%s", testProjectID, oc.Name)})
ss := getPackageRemovalStartupScript(pkgManager, packageName)
setup := packageManagementTestSetup{
image: image,
name: instanceName,
osconfig: oc,
assignment: assign,
fname: testName,
vf: vf,
vstring: vs,
startup: &api.MetadataItems{
Key: "startup-script",
Value: &ss,
},
}
pkgTestSetup = append(pkgTestSetup, &setup)
}
return pkgTestSetup
}
func addPackageInstallRemovalTest(pkgTestSetup []*packageManagementTestSetup) []*packageManagementTestSetup {
testName := "packageinstallremovaltest"
desc := "test package removal takes precedence over package installation"
packageName := "cowsay"
for _, pkgManager := range pkgManagers {
var oc *osconfigpb.OsConfig
var image, vs string
switch pkgManager {
case "apt":
image = debianImage
installPkg := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
removePkg := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(testName, desc, osconfigserver.BuildAptPackageConfig(installPkg, removePkg, nil), nil, nil, nil, nil)
vs = fmt.Sprintf(packageNotInstalledString)
case "yum":
image = centosImage
installPkg := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
removePkg := []*osconfigpb.Package{osconfigserver.BuildPackage(packageName)}
oc = osconfigserver.BuildOsConfig(testName, desc, osconfigserver.BuildAptPackageConfig(installPkg, removePkg, nil), nil, nil, nil, nil)
vs = fmt.Sprintf(packageNotInstalledString)
default:
panic(fmt.Sprintf("non existent package manager: %s", pkgManager))
}
instanceName := fmt.Sprintf("%s-%s", path.Base(image), testName)
assign := osconfigserver.BuildAssignment(testName, desc, osconfigserver.BuildInstanceFilterExpression(instanceName), []string{fmt.Sprintf("projects/%s/osConfigs/%s", testProjectID, oc.Name)})
ss := getPackageInstallRemovalStartupScript(pkgManager, packageName)
setup := packageManagementTestSetup{
image: image,
name: instanceName,
osconfig: oc,
assignment: assign,
fname: testName,
vf: vf,
vstring: vs,
startup: &api.MetadataItems{
Key: "startup-script",
Value: &ss,
},
}
pkgTestSetup = append(pkgTestSetup, &setup)
}
return pkgTestSetup
}
func generateAllTestSetup() []*packageManagementTestSetup {
pkgTestSetup := []*packageManagementTestSetup{}
pkgTestSetup = addCreateOsConfigTest(pkgTestSetup)
pkgTestSetup = addPackageInstallTest(pkgTestSetup)
pkgTestSetup = addPackageRemovalTest(pkgTestSetup)
pkgTestSetup = addPackageInstallRemovalTest(pkgTestSetup)
return pkgTestSetup
}
| 1 | 8,337 | I don't see pkgManager used anywhere, is there a reason we need this? | GoogleCloudPlatform-compute-image-tools | go |
@@ -182,7 +182,7 @@ public class CoreDescriptor {
*/
public CoreDescriptor(String name, Path instanceDir, Map<String, String> coreProps,
Properties containerProperties, ZkController zkController) {
- this.instanceDir = instanceDir;
+ this.instanceDir = instanceDir.toAbsolutePath();
originalCoreProperties.setProperty(CORE_NAME, name);
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.commons.lang3.StringUtils;
import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.common.SolrException;
import org.apache.solr.util.PropertiesUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Metadata about a {@link SolrCore}.
* It's mostly loaded from a file on disk at the very beginning of loading a core.
*
* It's mostly but not completely immutable; we should fix this!
*
* @since solr 1.3
*/
public class CoreDescriptor {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
// Properties file name constants
public static final String CORE_NAME = "name";
public static final String CORE_CONFIG = "config";
public static final String CORE_DATADIR = "dataDir";
public static final String CORE_ULOGDIR = "ulogDir";
public static final String CORE_SCHEMA = "schema";
public static final String CORE_SHARD = "shard";
public static final String CORE_COLLECTION = "collection";
public static final String CORE_ROLES = "roles";
public static final String CORE_PROPERTIES = "properties";
public static final String CORE_LOADONSTARTUP = "loadOnStartup";
public static final String CORE_TRANSIENT = "transient";
public static final String CORE_NODE_NAME = "coreNodeName";
public static final String CORE_CONFIGSET = "configSet";
public static final String CORE_CONFIGSET_PROPERTIES = "configSetProperties";
public static final String SOLR_CORE_PROP_PREFIX = "solr.core.";
public static final String DEFAULT_EXTERNAL_PROPERTIES_FILE = "conf" + File.separator + "solrcore.properties";
/**
* Whether this core was configured using a configSet that was trusted.
* This helps in avoiding the loading of plugins that have potential
* vulnerabilities, when the configSet was not uploaded from a trusted
* user.
*/
private boolean trustedConfigSet = true;
/**
* Get the standard properties in persistable form
* @return the standard core properties in persistable form
*/
public Properties getPersistableStandardProperties() {
return originalCoreProperties;
}
/**
* Get user-defined core properties in persistable form
* @return user-defined core properties in persistable form
*/
public Properties getPersistableUserProperties() {
return originalExtraProperties;
}
private static ImmutableMap<String, String> defaultProperties = new ImmutableMap.Builder<String, String>()
.put(CORE_CONFIG, "solrconfig.xml")
.put(CORE_SCHEMA, "schema.xml")
.put(CORE_CONFIGSET_PROPERTIES, ConfigSetProperties.DEFAULT_FILENAME)
.put(CORE_DATADIR, "data" + File.separator)
.put(CORE_TRANSIENT, "false")
.put(CORE_LOADONSTARTUP, "true")
.build();
private static ImmutableList<String> requiredProperties = ImmutableList.of(
CORE_NAME
);
public static ImmutableList<String> standardPropNames = ImmutableList.of(
CORE_NAME,
CORE_CONFIG,
CORE_DATADIR,
CORE_ULOGDIR,
CORE_SCHEMA,
CORE_PROPERTIES,
CORE_CONFIGSET_PROPERTIES,
CORE_LOADONSTARTUP,
CORE_TRANSIENT,
CORE_CONFIGSET,
// cloud props
CORE_SHARD,
CORE_COLLECTION,
CORE_ROLES,
CORE_NODE_NAME,
CloudDescriptor.NUM_SHARDS
);
private final CloudDescriptor cloudDesc;
private final Path instanceDir;
/** The original standard core properties, before substitution */
protected final Properties originalCoreProperties = new Properties();
/** The original extra core properties, before substitution */
protected final Properties originalExtraProperties = new Properties();
/** The properties for this core, as available through getProperty() */
protected final Properties coreProperties = new Properties();
/** The properties for this core, substitutable by resource loaders */
protected final Properties substitutableProperties = new Properties();
/** TESTS ONLY */
public CoreDescriptor(String name, Path instanceDir, CoreContainer coreContainer, String... coreProps) {
this(name, instanceDir, toMap(coreProps), coreContainer.getContainerProperties(), coreContainer.getZkController());
}
private static Map<String, String> toMap(String... properties) {
Map<String, String> props = new HashMap<>();
assert properties.length % 2 == 0;
for (int i = 0; i < properties.length; i += 2) {
props.put(properties[i], properties[i+1]);
}
return props;
}
/**
* Create a new CoreDescriptor using the properties of an existing one
* @param coreName the new CoreDescriptor's name
* @param other the CoreDescriptor to copy
*/
public CoreDescriptor(String coreName, CoreDescriptor other) {
this.cloudDesc = other.cloudDesc;
this.instanceDir = other.instanceDir;
this.originalExtraProperties.putAll(other.originalExtraProperties);
this.originalCoreProperties.putAll(other.originalCoreProperties);
this.coreProperties.putAll(other.coreProperties);
this.substitutableProperties.putAll(other.substitutableProperties);
this.coreProperties.setProperty(CORE_NAME, coreName);
this.originalCoreProperties.setProperty(CORE_NAME, coreName);
this.substitutableProperties.setProperty(SOLR_CORE_PROP_PREFIX + CORE_NAME, coreName);
this.trustedConfigSet = other.trustedConfigSet;
}
/**
* Create a new CoreDescriptor.
* @param name the CoreDescriptor's name
* @param instanceDir a Path resolving to the instanceDir
* @param coreProps a Map of the properties for this core
* @param containerProperties the properties from the enclosing container.
* @param zkController the ZkController in SolrCloud mode, otherwise null.
*/
public CoreDescriptor(String name, Path instanceDir, Map<String, String> coreProps,
Properties containerProperties, ZkController zkController) {
this.instanceDir = instanceDir;
originalCoreProperties.setProperty(CORE_NAME, name);
name = PropertiesUtil.substituteProperty(checkPropertyIsNotEmpty(name, CORE_NAME),
containerProperties);
coreProperties.putAll(defaultProperties);
coreProperties.put(CORE_NAME, name);
for (Map.Entry<String, String> entry : coreProps.entrySet()) {
String propname = entry.getKey();
String propvalue = entry.getValue();
if (isUserDefinedProperty(propname))
originalExtraProperties.put(propname, propvalue);
else
originalCoreProperties.put(propname, propvalue);
if (!requiredProperties.contains(propname)) // Required props are already dealt with
coreProperties.setProperty(propname,
PropertiesUtil.substituteProperty(propvalue, containerProperties));
}
loadExtraProperties();
buildSubstitutableProperties();
// TODO maybe make this a CloudCoreDescriptor subclass?
if (zkController != null) {
cloudDesc = new CloudDescriptor(this, name, coreProperties);
} else {
cloudDesc = null;
}
log.debug("Created CoreDescriptor: {}", coreProperties);
}
/**
* Load properties specified in an external properties file.
*
* The file to load can be specified in a {@code properties} property on
* the original Properties object used to create this CoreDescriptor. If
* this has not been set, then we look for {@code conf/solrcore.properties}
* underneath the instance dir.
*
* File paths are taken as read from the core's instance directory
* if they are not absolute.
*/
protected void loadExtraProperties() {
String filename = coreProperties.getProperty(CORE_PROPERTIES, DEFAULT_EXTERNAL_PROPERTIES_FILE);
Path propertiesFile = instanceDir.resolve(filename);
if (Files.exists(propertiesFile)) {
try (InputStream is = Files.newInputStream(propertiesFile)) {
Properties externalProps = new Properties();
externalProps.load(new InputStreamReader(is, StandardCharsets.UTF_8));
coreProperties.putAll(externalProps);
} catch (IOException e) {
String message = String.format(Locale.ROOT, "Could not load properties from %s: %s:",
propertiesFile.toString(), e.toString());
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, message);
}
}
}
/**
* Create the properties object used by resource loaders, etc, for property
* substitution. The default solr properties are prefixed with 'solr.core.', so,
* e.g., 'name' becomes 'solr.core.name'
*/
protected void buildSubstitutableProperties() {
for (String propName : coreProperties.stringPropertyNames()) {
String propValue = coreProperties.getProperty(propName);
if (!isUserDefinedProperty(propName))
propName = SOLR_CORE_PROP_PREFIX + propName;
substitutableProperties.setProperty(propName, propValue);
}
substitutableProperties.setProperty("solr.core.instanceDir", instanceDir.toAbsolutePath().toString());
}
/**
* Is this property a Solr-standard property, or is it an extra property
* defined per-core by the user?
* @param propName the Property name
* @return {@code true} if this property is user-defined
*/
protected static boolean isUserDefinedProperty(String propName) {
return !standardPropNames.contains(propName);
}
public static String checkPropertyIsNotEmpty(String value, String propName) {
if (StringUtils.isEmpty(value)) {
String message = String.format(Locale.ROOT, "Cannot create core with empty %s value", propName);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, message);
}
return value;
}
public String getPropertiesName() {
return coreProperties.getProperty(CORE_PROPERTIES);
}
public String getDataDir() {
return coreProperties.getProperty(CORE_DATADIR);
}
public boolean usingDefaultDataDir() {
return defaultProperties.get(CORE_DATADIR).equals(coreProperties.getProperty(CORE_DATADIR));
}
/**
* @return the core instance directory
*/
public Path getInstanceDir() {
return instanceDir;
}
/**@return the core configuration resource name. */
public String getConfigName() {
return coreProperties.getProperty(CORE_CONFIG);
}
/**@return the core schema resource name. Not actually used if schema is managed mode. */
public String getSchemaName() {
return coreProperties.getProperty(CORE_SCHEMA);
}
/**@return the initial core name */
public String getName() {
return coreProperties.getProperty(CORE_NAME);
}
/** TODO remove mutability */
void setProperty(String prop, String val) {
if (substitutableProperties.containsKey(prop)) {
substitutableProperties.setProperty(prop, val);
return;
}
coreProperties.setProperty(prop, val);
}
public String getCollectionName() {
return cloudDesc == null ? null : cloudDesc.getCollectionName();
}
public CloudDescriptor getCloudDescriptor() {
return cloudDesc;
}
public boolean isLoadOnStartup() {
String tmp = coreProperties.getProperty(CORE_LOADONSTARTUP, "false");
return Boolean.parseBoolean(tmp);
}
public boolean isTransient() {
String tmp = coreProperties.getProperty(CORE_TRANSIENT, "false");
return PropertiesUtil.toBoolean(tmp);
}
public String getUlogDir() {
return coreProperties.getProperty(CORE_ULOGDIR);
}
/**
* Returns a specific property defined on this CoreDescriptor
* @param prop - value to read from the properties structure.
* @param defVal - return if no property found.
* @return associated string. May be null.
*/
public String getCoreProperty(String prop, String defVal) {
return coreProperties.getProperty(prop, defVal);
}
/**
* Returns all substitutable properties defined on this CoreDescriptor
* @return all substitutable properties defined on this CoreDescriptor
*/
public Properties getSubstitutableProperties() {
return substitutableProperties;
}
@Override
public String toString() {
return "CoreDescriptor[name=" + this.getName() + ";instanceDir=" + this.getInstanceDir() + "]";
}
public String getConfigSet() {
//TODO consider falling back on "collection.configName" ( CollectionAdminParams.COLL_CONF )
return coreProperties.getProperty(CORE_CONFIGSET);
}
/** TODO remove mutability or at least make this non-public? */
public void setConfigSet(String configSetName) {
coreProperties.setProperty(CORE_CONFIGSET, configSetName);
}
public String getConfigSetPropertiesName() {
return coreProperties.getProperty(CORE_CONFIGSET_PROPERTIES);
}
public boolean isConfigSetTrusted() {
return trustedConfigSet;
}
/** TODO remove mutability */
public void setConfigSetTrusted(boolean trusted) {
this.trustedConfigSet = trusted;
}
}
| 1 | 34,527 | A bit late, but I don't think this is necessary, as all callers will send absolute paths. And if you ever get a relative path, resolving it with `toAbsolutePath()` leads to it being relative to whatever CWD the app is started with, while the typical resolving of relative `instanceDir` is to resolve it relative to CoreContainer#coreRootDirectory. | apache-lucene-solr | java |
@@ -4188,7 +4188,8 @@ function getModelsMapForPopulate(model, docs, options) {
foreignField = foreignField.call(doc);
}
- const localFieldPath = modelSchema.paths[localField];
+ const localFieldPathType = modelSchema._getPathType(localField);
+ const localFieldPath = localFieldPathType === 'real' ? modelSchema.paths[localField] : localFieldPathType.schema;
const localFieldGetters = localFieldPath ? localFieldPath.getters : [];
let ret;
| 1 | 'use strict';
/*!
* Module dependencies.
*/
const Aggregate = require('./aggregate');
const ChangeStream = require('./cursor/ChangeStream');
const Document = require('./document');
const DocumentNotFoundError = require('./error').DocumentNotFoundError;
const DivergentArrayError = require('./error').DivergentArrayError;
const Error = require('./error');
const EventEmitter = require('events').EventEmitter;
const MongooseMap = require('./types/map');
const OverwriteModelError = require('./error').OverwriteModelError;
const PromiseProvider = require('./promise_provider');
const Query = require('./query');
const Schema = require('./schema');
const VersionError = require('./error').VersionError;
const ParallelSaveError = require('./error').ParallelSaveError;
const applyQueryMiddleware = require('./helpers/query/applyQueryMiddleware');
const applyHooks = require('./helpers/model/applyHooks');
const applyMethods = require('./helpers/model/applyMethods');
const applyStatics = require('./helpers/model/applyStatics');
const applyWriteConcern = require('./helpers/schema/applyWriteConcern');
const assignRawDocsToIdStructure = require('./helpers/populate/assignRawDocsToIdStructure');
const castBulkWrite = require('./helpers/model/castBulkWrite');
const discriminator = require('./helpers/model/discriminator');
const getDiscriminatorByValue = require('./queryhelpers').getDiscriminatorByValue;
const immediate = require('./helpers/immediate');
const internalToObjectOptions = require('./options').internalToObjectOptions;
const isPathExcluded = require('./helpers/projection/isPathExcluded');
const isPathSelectedInclusive = require('./helpers/projection/isPathSelectedInclusive');
const get = require('./helpers/get');
const getSchemaTypes = require('./helpers/populate/getSchemaTypes');
const getVirtual = require('./helpers/populate/getVirtual');
const leanPopulateMap = require('./helpers/populate/leanPopulateMap');
const modifiedPaths = require('./helpers/update/modifiedPaths');
const mpath = require('mpath');
const normalizeRefPath = require('./helpers/populate/normalizeRefPath');
const parallel = require('async/parallel');
const parallelLimit = require('async/parallelLimit');
const setParentPointers = require('./helpers/schema/setParentPointers');
const util = require('util');
const utils = require('./utils');
const VERSION_WHERE = 1;
const VERSION_INC = 2;
const VERSION_ALL = VERSION_WHERE | VERSION_INC;
const modelCollectionSymbol = Symbol.for('mongoose#Model#collection');
const modelSymbol = require('./helpers/symbols').modelSymbol;
const schemaMixedSymbol = require('./schema/symbols').schemaMixedSymbol;
/**
* A Model is a class that's your primary tool for interacting with MongoDB.
* An instance of a Model is called a [Document](./api.html#Document).
*
* In Mongoose, the term "Model" refers to subclasses of the `mongoose.Model`
* class. You should not use the `mongoose.Model` class directly. The
* [`mongoose.model()`](./api.html#mongoose_Mongoose-model) and
* [`connection.model()`](./api.html#connection_Connection-model) functions
* create subclasses of `mongoose.Model` as shown below.
*
* ####Example:
*
* // `UserModel` is a "Model", a subclass of `mongoose.Model`.
* const UserModel = mongoose.model('User', new Schema({ name: String }));
*
* // You can use a Model to create new documents using `new`:
* const userDoc = new UserModel({ name: 'Foo' });
* await userDoc.save();
*
* // You also use a model to create queries:
* const userFromDb = await UserModel.findOne({ name: 'Foo' });
*
* @param {Object} doc values for initial set
* @param [fields] optional object containing the fields that were selected in the query which returned this document. You do **not** need to set this parameter to ensure Mongoose handles your [query projetion](./api.html#query_Query-select).
* @inherits Document http://mongoosejs.com/docs/api.html#document-js
* @event `error`: If listening to this event, 'error' is emitted when a document was saved without passing a callback and an `error` occurred. If not listening, the event bubbles to the connection used to create this Model.
* @event `index`: Emitted after `Model#ensureIndexes` completes. If an error occurred it is passed with the event.
* @event `index-single-start`: Emitted when an individual index starts within `Model#ensureIndexes`. The fields and options being used to build the index are also passed with the event.
* @event `index-single-done`: Emitted when an individual index finishes within `Model#ensureIndexes`. If an error occurred it is passed with the event. The fields, options, and index name are also passed.
* @api public
*/
function Model(doc, fields, skipId) {
if (fields instanceof Schema) {
throw new TypeError('2nd argument to `Model` must be a POJO or string, ' +
'**not** a schema. Make sure you\'re calling `mongoose.model()`, not ' +
'`mongoose.Model()`.');
}
Document.call(this, doc, fields, skipId);
}
/*!
* Inherits from Document.
*
* All Model.prototype features are available on
* top level (non-sub) documents.
*/
Model.prototype.__proto__ = Document.prototype;
Model.prototype.$isMongooseModelPrototype = true;
/**
* Connection the model uses.
*
* @api public
* @property db
* @memberOf Model
* @instance
*/
Model.prototype.db;
/**
* Collection the model uses.
*
* This property is read-only. Modifying this property is a no-op.
*
* @api public
* @property collection
* @memberOf Model
* @instance
*/
Model.prototype.collection;
/**
* The name of the model
*
* @api public
* @property modelName
* @memberOf Model
* @instance
*/
Model.prototype.modelName;
/**
* Additional properties to attach to the query when calling `save()` and
* `isNew` is false.
*
* @api public
* @property $where
* @memberOf Model
* @instance
*/
Model.prototype.$where;
/**
* If this is a discriminator model, `baseModelName` is the name of
* the base model.
*
* @api public
* @property baseModelName
* @memberOf Model
* @instance
*/
Model.prototype.baseModelName;
/**
* Event emitter that reports any errors that occurred. Useful for global error
* handling.
*
* ####Example:
*
* MyModel.events.on('error', err => console.log(err.message));
*
* // Prints a 'CastError' because of the above handler
* await MyModel.findOne({ _id: 'notanid' }).catch(noop);
*
* @api public
* @fires error whenever any query or model function errors
* @memberOf Model
* @static events
*/
Model.events;
/*!
* Compiled middleware for this model. Set in `applyHooks()`.
*
* @api private
* @property _middleware
* @memberOf Model
* @static
*/
Model._middleware;
/*!
* ignore
*/
function _applyCustomWhere(doc, where) {
if (doc.$where == null) {
return;
}
const keys = Object.keys(doc.$where);
const len = keys.length;
for (let i = 0; i < len; ++i) {
where[keys[i]] = doc.$where[keys[i]];
}
}
/*!
* ignore
*/
Model.prototype.$__handleSave = function(options, callback) {
const _this = this;
let saveOptions = {};
if ('safe' in options) {
_handleSafe(options);
}
applyWriteConcern(this.schema, options);
if ('w' in options) {
saveOptions.w = options.w;
}
if ('j' in options) {
saveOptions.j = options.j;
}
if ('wtimeout' in options) {
saveOptions.wtimeout = options.wtimeout;
}
if ('checkKeys' in options) {
saveOptions.checkKeys = options.checkKeys;
}
const session = 'session' in options ? options.session : this.$session();
if (session != null) {
saveOptions.session = session;
this.$session(session);
}
if (Object.keys(saveOptions).length === 0) {
saveOptions = null;
}
if (this.isNew) {
// send entire doc
const obj = this.toObject(internalToObjectOptions);
if ((obj || {})._id === void 0) {
// documents must have an _id else mongoose won't know
// what to update later if more changes are made. the user
// wouldn't know what _id was generated by mongodb either
// nor would the ObjectId generated my mongodb necessarily
// match the schema definition.
setTimeout(function() {
callback(new Error('document must have an _id before saving'));
}, 0);
return;
}
this.$__version(true, obj);
this[modelCollectionSymbol].insertOne(obj, saveOptions, function(err, ret) {
if (err) {
_this.isNew = true;
_this.emit('isNew', true);
_this.constructor.emit('isNew', true);
callback(err, null);
return;
}
callback(null, ret);
});
this.$__reset();
this.isNew = false;
this.emit('isNew', false);
this.constructor.emit('isNew', false);
// Make it possible to retry the insert
this.$__.inserting = true;
} else {
// Make sure we don't treat it as a new object on error,
// since it already exists
this.$__.inserting = false;
const delta = this.$__delta();
if (delta) {
if (delta instanceof Error) {
callback(delta);
return;
}
const where = this.$__where(delta[0]);
if (where instanceof Error) {
callback(where);
return;
}
_applyCustomWhere(this, where);
this[modelCollectionSymbol].updateOne(where, delta[1], saveOptions, function(err, ret) {
if (err) {
callback(err);
return;
}
ret.$where = where;
callback(null, ret);
});
} else {
this.$__reset();
callback();
return;
}
this.emit('isNew', false);
this.constructor.emit('isNew', false);
}
};
/*!
* ignore
*/
Model.prototype.$__save = function(options, callback) {
this.$__handleSave(options, (error, result) => {
if (error) {
return this.schema.s.hooks.execPost('save:error', this, [this], { error: error }, function(error) {
callback(error);
});
}
// store the modified paths before the document is reset
const modifiedPaths = this.modifiedPaths();
this.$__reset();
let numAffected = 0;
if (get(options, 'safe.w') !== 0 && get(options, 'w') !== 0) {
// Skip checking if write succeeded if writeConcern is set to
// unacknowledged writes, because otherwise `numAffected` will always be 0
if (result) {
if (Array.isArray(result)) {
numAffected = result.length;
} else if (result.result && result.result.n !== undefined) {
numAffected = result.result.n;
} else if (result.result && result.result.nModified !== undefined) {
numAffected = result.result.nModified;
} else {
numAffected = result;
}
}
// was this an update that required a version bump?
if (this.$__.version && !this.$__.inserting) {
const doIncrement = VERSION_INC === (VERSION_INC & this.$__.version);
this.$__.version = undefined;
const key = this.schema.options.versionKey;
const version = this.getValue(key) || 0;
if (numAffected <= 0) {
// the update failed. pass an error back
const err = options.$versionError || new VersionError(this, version, modifiedPaths);
return callback(err);
}
// increment version if was successful
if (doIncrement) {
this.setValue(key, version + 1);
}
}
if (result != null && numAffected <= 0) {
error = new DocumentNotFoundError(result.$where);
return this.schema.s.hooks.execPost('save:error', this, [this], { error: error }, function(error) {
callback(error);
});
}
}
this.$__.saving = undefined;
this.emit('save', this, numAffected);
this.constructor.emit('save', this, numAffected);
callback(null, this);
});
};
/*!
* ignore
*/
function generateVersionError(doc, modifiedPaths) {
const key = doc.schema.options.versionKey;
if (!key) {
return null;
}
const version = doc.getValue(key) || 0;
return new VersionError(doc, version, modifiedPaths);
}
/**
* Saves this document.
*
* ####Example:
*
* product.sold = Date.now();
* product.save(function (err, product) {
* if (err) ..
* })
*
* The callback will receive two parameters
*
* 1. `err` if an error occurred
* 2. `product` which is the saved `product`
*
* As an extra measure of flow control, save will return a Promise.
* ####Example:
* product.save().then(function(product) {
* ...
* });
*
* @param {Object} [options] options optional options
* @param {Object} [options.safe] (DEPRECATED) overrides [schema's safe option](http://mongoosejs.com//docs/guide.html#safe). Use the `w` option instead.
* @param {Boolean} [options.validateBeforeSave] set to false to save without validating.
* @param {Number|String} [options.w] set the [write concern](https://docs.mongodb.com/manual/reference/write-concern/#w-option). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern)
* @param {Boolean} [options.j] set to true for MongoDB to wait until this `save()` has been [journaled before resolving the returned promise](https://docs.mongodb.com/manual/reference/write-concern/#j-option). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern)
* @param {Number} [options.wtimeout] sets a [timeout for the write concern](https://docs.mongodb.com/manual/reference/write-concern/#wtimeout). Overrides the [schema-level `writeConcern` option](/docs/guide.html#writeConcern).
* @param {Boolean} [options.checkKeys=true] the MongoDB driver prevents you from saving keys that start with '$' or contain '.' by default. Set this option to `false` to skip that check. See [restrictions on field names](https://docs.mongodb.com/manual/reference/limits/#Restrictions-on-Field-Names)
* @param {Boolean} [options.timestamps=true] if `false` and [timestamps](./guide.html#timestamps) are enabled, skip timestamps for this `save()`.
* @param {Session} [options.session=null] the [session](https://docs.mongodb.com/manual/reference/server-sessions/) associated with this save operation. If not specified, defaults to the [document's associated session](api.html#document_Document-$session).
* @param {Function} [fn] optional callback
* @return {Promise|undefined} Returns undefined if used with callback or a Promise otherwise.
* @api public
* @see middleware http://mongoosejs.com/docs/middleware.html
*/
Model.prototype.save = function(options, fn) {
let parallelSave;
if (this.$__.saving) {
parallelSave = new ParallelSaveError(this);
} else {
this.$__.saving = new ParallelSaveError(this);
}
if (typeof options === 'function') {
fn = options;
options = undefined;
}
if (options != null) {
options = utils.clone(options);
} else {
options = {};
}
if (fn) {
fn = this.constructor.$wrapCallback(fn);
}
options.$versionError = generateVersionError(this, this.modifiedPaths());
return utils.promiseOrCallback(fn, cb => {
if (parallelSave) {
this.$__handleReject(parallelSave);
return cb(parallelSave);
}
this.$__.saveOptions = options;
this.$__save(options, error => {
this.$__.saving = undefined;
delete this.$__.saveOptions;
if (error) {
this.$__handleReject(error);
return cb(error);
}
cb(null, this);
});
}, this.constructor.events);
};
/*!
* Determines whether versioning should be skipped for the given path
*
* @param {Document} self
* @param {String} path
* @return {Boolean} true if versioning should be skipped for the given path
*/
function shouldSkipVersioning(self, path) {
const skipVersioning = self.schema.options.skipVersioning;
if (!skipVersioning) return false;
// Remove any array indexes from the path
path = path.replace(/\.\d+\./, '.');
return skipVersioning[path];
}
/*!
* Apply the operation to the delta (update) clause as
* well as track versioning for our where clause.
*
* @param {Document} self
* @param {Object} where
* @param {Object} delta
* @param {Object} data
* @param {Mixed} val
* @param {String} [operation]
*/
function operand(self, where, delta, data, val, op) {
// delta
op || (op = '$set');
if (!delta[op]) delta[op] = {};
delta[op][data.path] = val;
// disabled versioning?
if (self.schema.options.versionKey === false) return;
// path excluded from versioning?
if (shouldSkipVersioning(self, data.path)) return;
// already marked for versioning?
if (VERSION_ALL === (VERSION_ALL & self.$__.version)) return;
switch (op) {
case '$set':
case '$unset':
case '$pop':
case '$pull':
case '$pullAll':
case '$push':
case '$addToSet':
break;
default:
// nothing to do
return;
}
// ensure updates sent with positional notation are
// editing the correct array element.
// only increment the version if an array position changes.
// modifying elements of an array is ok if position does not change.
if (op === '$push' || op === '$addToSet' || op === '$pullAll' || op === '$pull') {
self.$__.version = VERSION_INC;
} else if (/^\$p/.test(op)) {
// potentially changing array positions
self.increment();
} else if (Array.isArray(val)) {
// $set an array
self.increment();
} else if (/\.\d+\.|\.\d+$/.test(data.path)) {
// now handling $set, $unset
// subpath of array
self.$__.version = VERSION_WHERE;
}
}
/*!
* Compiles an update and where clause for a `val` with _atomics.
*
* @param {Document} self
* @param {Object} where
* @param {Object} delta
* @param {Object} data
* @param {Array} value
*/
function handleAtomics(self, where, delta, data, value) {
if (delta.$set && delta.$set[data.path]) {
// $set has precedence over other atomics
return;
}
if (typeof value.$__getAtomics === 'function') {
value.$__getAtomics().forEach(function(atomic) {
const op = atomic[0];
const val = atomic[1];
operand(self, where, delta, data, val, op);
});
return;
}
// legacy support for plugins
const atomics = value._atomics;
const ops = Object.keys(atomics);
let i = ops.length;
let val;
let op;
if (i === 0) {
// $set
if (utils.isMongooseObject(value)) {
value = value.toObject({depopulate: 1, _isNested: true});
} else if (value.valueOf) {
value = value.valueOf();
}
return operand(self, where, delta, data, value);
}
function iter(mem) {
return utils.isMongooseObject(mem)
? mem.toObject({depopulate: 1, _isNested: true})
: mem;
}
while (i--) {
op = ops[i];
val = atomics[op];
if (utils.isMongooseObject(val)) {
val = val.toObject({depopulate: true, transform: false, _isNested: true});
} else if (Array.isArray(val)) {
val = val.map(iter);
} else if (val.valueOf) {
val = val.valueOf();
}
if (op === '$addToSet') {
val = {$each: val};
}
operand(self, where, delta, data, val, op);
}
}
/**
* Produces a special query document of the modified properties used in updates.
*
* @api private
* @method $__delta
* @memberOf Model
* @instance
*/
Model.prototype.$__delta = function() {
const dirty = this.$__dirty();
if (!dirty.length && VERSION_ALL !== this.$__.version) {
return;
}
const where = {};
const delta = {};
const len = dirty.length;
const divergent = [];
let d = 0;
where._id = this._doc._id;
// If `_id` is an object, need to depopulate, but also need to be careful
// because `_id` can technically be null (see gh-6406)
if (get(where, '_id.$__', null) != null) {
where._id = where._id.toObject({ transform: false, depopulate: true });
}
for (; d < len; ++d) {
const data = dirty[d];
let value = data.value;
const match = checkDivergentArray(this, data.path, value);
if (match) {
divergent.push(match);
continue;
}
const pop = this.populated(data.path, true);
if (!pop && this.$__.selected) {
// If any array was selected using an $elemMatch projection, we alter the path and where clause
// NOTE: MongoDB only supports projected $elemMatch on top level array.
const pathSplit = data.path.split('.');
const top = pathSplit[0];
if (this.$__.selected[top] && this.$__.selected[top].$elemMatch) {
// If the selected array entry was modified
if (pathSplit.length > 1 && pathSplit[1] == 0 && typeof where[top] === 'undefined') {
where[top] = this.$__.selected[top];
pathSplit[1] = '$';
data.path = pathSplit.join('.');
}
// if the selected array was modified in any other way throw an error
else {
divergent.push(data.path);
continue;
}
}
}
if (divergent.length) continue;
if (value === undefined) {
operand(this, where, delta, data, 1, '$unset');
} else if (value === null) {
operand(this, where, delta, data, null);
} else if (value._path && value._atomics) {
// arrays and other custom types (support plugins etc)
handleAtomics(this, where, delta, data, value);
} else if (value._path && Buffer.isBuffer(value)) {
// MongooseBuffer
value = value.toObject();
operand(this, where, delta, data, value);
} else {
value = utils.clone(value, {
depopulate: true,
transform: false,
virtuals: false,
_isNested: true
});
operand(this, where, delta, data, value);
}
}
if (divergent.length) {
return new DivergentArrayError(divergent);
}
if (this.$__.version) {
this.$__version(where, delta);
}
return [where, delta];
};
/*!
* Determine if array was populated with some form of filter and is now
* being updated in a manner which could overwrite data unintentionally.
*
* @see https://github.com/Automattic/mongoose/issues/1334
* @param {Document} doc
* @param {String} path
* @return {String|undefined}
*/
function checkDivergentArray(doc, path, array) {
// see if we populated this path
const pop = doc.populated(path, true);
if (!pop && doc.$__.selected) {
// If any array was selected using an $elemMatch projection, we deny the update.
// NOTE: MongoDB only supports projected $elemMatch on top level array.
const top = path.split('.')[0];
if (doc.$__.selected[top + '.$']) {
return top;
}
}
if (!(pop && array && array.isMongooseArray)) return;
// If the array was populated using options that prevented all
// documents from being returned (match, skip, limit) or they
// deselected the _id field, $pop and $set of the array are
// not safe operations. If _id was deselected, we do not know
// how to remove elements. $pop will pop off the _id from the end
// of the array in the db which is not guaranteed to be the
// same as the last element we have here. $set of the entire array
// would be similarily destructive as we never received all
// elements of the array and potentially would overwrite data.
const check = pop.options.match ||
pop.options.options && utils.object.hasOwnProperty(pop.options.options, 'limit') || // 0 is not permitted
pop.options.options && pop.options.options.skip || // 0 is permitted
pop.options.select && // deselected _id?
(pop.options.select._id === 0 ||
/\s?-_id\s?/.test(pop.options.select));
if (check) {
const atomics = array._atomics;
if (Object.keys(atomics).length === 0 || atomics.$set || atomics.$pop) {
return path;
}
}
}
/**
* Appends versioning to the where and update clauses.
*
* @api private
* @method $__version
* @memberOf Model
* @instance
*/
Model.prototype.$__version = function(where, delta) {
const key = this.schema.options.versionKey;
if (where === true) {
// this is an insert
if (key) this.setValue(key, delta[key] = 0);
return;
}
// updates
// only apply versioning if our versionKey was selected. else
// there is no way to select the correct version. we could fail
// fast here and force them to include the versionKey but
// thats a bit intrusive. can we do this automatically?
if (!this.isSelected(key)) {
return;
}
// $push $addToSet don't need the where clause set
if (VERSION_WHERE === (VERSION_WHERE & this.$__.version)) {
const value = this.getValue(key);
if (value != null) where[key] = value;
}
if (VERSION_INC === (VERSION_INC & this.$__.version)) {
if (get(delta.$set, key, null) != null) {
// Version key is getting set, means we'll increment the doc's version
// after a successful save, so we should set the incremented version so
// future saves don't fail (gh-5779)
++delta.$set[key];
} else {
delta.$inc = delta.$inc || {};
delta.$inc[key] = 1;
}
}
};
/**
* Signal that we desire an increment of this documents version.
*
* ####Example:
*
* Model.findById(id, function (err, doc) {
* doc.increment();
* doc.save(function (err) { .. })
* })
*
* @see versionKeys http://mongoosejs.com/docs/guide.html#versionKey
* @api public
*/
Model.prototype.increment = function increment() {
this.$__.version = VERSION_ALL;
return this;
};
/**
* Returns a query object
*
* @api private
* @method $__where
* @memberOf Model
* @instance
*/
Model.prototype.$__where = function _where(where) {
where || (where = {});
if (!where._id) {
where._id = this._doc._id;
}
if (this._doc._id === void 0) {
return new Error('No _id found on document!');
}
return where;
};
/**
* Removes this document from the db.
*
* ####Example:
* product.remove(function (err, product) {
* if (err) return handleError(err);
* Product.findById(product._id, function (err, product) {
* console.log(product) // null
* })
* })
*
*
* As an extra measure of flow control, remove will return a Promise (bound to `fn` if passed) so it could be chained, or hooked to recieve errors
*
* ####Example:
* product.remove().then(function (product) {
* ...
* }).catch(function (err) {
* assert.ok(err)
* })
*
* @param {function(err,product)} [fn] optional callback
* @return {Promise} Promise
* @api public
*/
Model.prototype.remove = function remove(options, fn) {
if (typeof options === 'function') {
fn = options;
options = undefined;
}
if (!options) {
options = {};
}
if (fn) {
fn = this.constructor.$wrapCallback(fn);
}
return utils.promiseOrCallback(fn, cb => {
this.$__remove(options, cb);
}, this.constructor.events);
};
/**
* Alias for remove
*/
Model.prototype.delete = Model.prototype.remove;
/*!
* ignore
*/
Model.prototype.$__remove = function $__remove(options, cb) {
if (this.$__.isDeleted) {
return immediate(() => cb(null, this));
}
const where = this.$__where();
if (where instanceof Error) {
return cb(where);
}
_applyCustomWhere(this, where);
if (this.$session() != null) {
options = options || {};
if (!('session' in options)) {
options.session = this.$session();
}
}
this[modelCollectionSymbol].deleteOne(where, options, err => {
if (!err) {
this.$__.isDeleted = true;
this.emit('remove', this);
this.constructor.emit('remove', this);
return cb(null, this);
}
this.$__.isDeleted = false;
cb(err);
});
};
/**
* Returns another Model instance.
*
* ####Example:
*
* var doc = new Tank;
* doc.model('User').findById(id, callback);
*
* @param {String} name model name
* @api public
*/
Model.prototype.model = function model(name) {
return this.db.model(name);
};
/**
* Adds a discriminator type.
*
* ####Example:
*
* function BaseSchema() {
* Schema.apply(this, arguments);
*
* this.add({
* name: String,
* createdAt: Date
* });
* }
* util.inherits(BaseSchema, Schema);
*
* var PersonSchema = new BaseSchema();
* var BossSchema = new BaseSchema({ department: String });
*
* var Person = mongoose.model('Person', PersonSchema);
* var Boss = Person.discriminator('Boss', BossSchema);
* new Boss().__t; // "Boss". `__t` is the default `discriminatorKey`
*
* var employeeSchema = new Schema({ boss: ObjectId });
* var Employee = Person.discriminator('Employee', employeeSchema, 'staff');
* new Employee().__t; // "staff" because of 3rd argument above
*
* @param {String} name discriminator model name
* @param {Schema} schema discriminator model schema
* @param {String} value the string stored in the `discriminatorKey` property
* @api public
*/
Model.discriminator = function(name, schema, value) {
let model;
if (typeof name === 'function') {
model = name;
name = utils.getFunctionName(model);
if (!(model.prototype instanceof Model)) {
throw new Error('The provided class ' + name + ' must extend Model');
}
}
schema = discriminator(this, name, schema, value, true);
if (this.db.models[name]) {
throw new OverwriteModelError(name);
}
schema.$isRootDiscriminator = true;
schema.$globalPluginsApplied = true;
model = this.db.model(model || name, schema, this.collection.name);
this.discriminators[name] = model;
const d = this.discriminators[name];
d.prototype.__proto__ = this.prototype;
Object.defineProperty(d, 'baseModelName', {
value: this.modelName,
configurable: true,
writable: false
});
// apply methods and statics
applyMethods(d, schema);
applyStatics(d, schema);
return d;
};
// Model (class) features
/*!
* Give the constructor the ability to emit events.
*/
for (const i in EventEmitter.prototype) {
Model[i] = EventEmitter.prototype[i];
}
/**
* This function is responsible for building [indexes](https://docs.mongodb.com/manual/indexes/),
* unless [`autoIndex`](http://mongoosejs.com/docs/guide.html#autoIndex) is turned off.
*
* Mongoose calls this function automatically when a model is created using
* [`mongoose.model()`](/docs/api.html#mongoose_Mongoose-model) or
* * [`connection.model()`](/docs/api.html#connection_Connection-model), so you
* don't need to call it. This function is also idempotent, so you may call it
* to get back a promise that will resolve when your indexes are finished
* building as an alternative to [`MyModel.on('index')`](/docs/guide.html#indexes)
*
* ####Example:
*
* var eventSchema = new Schema({ thing: { type: 'string', unique: true }})
* // This calls `Event.init()` implicitly, so you don't need to call
* // `Event.init()` on your own.
* var Event = mongoose.model('Event', eventSchema);
*
* Event.init().then(function(Event) {
* // You can also use `Event.on('index')` if you prefer event emitters
* // over promises.
* console.log('Indexes are done building!');
* });
*
* @api public
* @param {Function} [callback]
* @returns {Promise}
*/
Model.init = function init(callback) {
this.schema.emit('init', this);
if (this.$init != null) {
if (callback) {
this.$init.then(() => callback(), err => callback(err));
return null;
}
return this.$init;
}
// If `dropDatabase()` is called, this model's collection will not be
// init-ed. It is sufficiently common to call `dropDatabase()` after
// `mongoose.connect()` but before creating models that we want to
// support this. See gh-6967
this.db.$internalEmitter.once('dropDatabase', () => {
delete this.$init;
});
const Promise = PromiseProvider.get();
const autoIndex = this.schema.options.autoIndex == null ?
this.db.config.autoIndex :
this.schema.options.autoIndex;
const autoCreate = this.schema.options.autoCreate == null ?
this.db.config.autoCreate :
this.schema.options.autoCreate;
const _ensureIndexes = autoIndex ?
cb => this.ensureIndexes({ _automatic: true }, cb) :
cb => cb();
const _createCollection = autoCreate ?
cb => this.createCollection({}, cb) :
cb => cb();
this.$init = new Promise((resolve, reject) => {
_createCollection(error => {
if (error) {
return reject(error);
}
_ensureIndexes(error => {
if (error) {
return reject(error);
}
resolve(this);
});
});
});
if (callback) {
this.$init.then(() => callback(), err => callback(err));
this.$caught = true;
return null;
} else {
const _catch = this.$init.catch;
const _this = this;
this.$init.catch = function() {
this.$caught = true;
return _catch.apply(_this.$init, arguments);
};
}
return this.$init;
};
/**
* Create the collection for this model. By default, if no indexes are specified,
* mongoose will not create the collection for the model until any documents are
* created. Use this method to create the collection explicitly.
*
* Note 1: You may need to call this before starting a transaction
* See https://docs.mongodb.com/manual/core/transactions/#transactions-and-operations
*
* Note 2: You don't have to call this if your schema contains index or unique field.
* In that case, just use `Model.init()`
*
* ####Example:
*
* var userSchema = new Schema({ name: String })
* var User = mongoose.model('User', userSchema);
*
* User.createCollection().then(function(collection) {
* console.log('Collection is created!');
* });
*
* @api public
* @param {Object} [options] see [MongoDB driver docs](http://mongodb.github.io/node-mongodb-native/3.1/api/Db.html#createCollection)
* @param {Function} [callback]
* @returns {Promise}
*/
Model.createCollection = function createCollection(options, callback) {
if (typeof options === 'string') {
throw new Error('You can\'t specify a new collection name in Model.createCollection.' +
'This is not like Connection.createCollection. Only options are accepted here.');
} else if (typeof options === 'function') {
callback = options;
options = null;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
const schemaCollation = get(this, 'schema.options.collation', null);
if (schemaCollation != null) {
options = Object.assign({ collation: schemaCollation }, options);
}
return utils.promiseOrCallback(callback, cb => {
this.db.createCollection(this.collection.collectionName, options, utils.tick((error) => {
if (error) {
return cb(error);
}
this.collection = this.db.collection(this.collection.collectionName, options);
cb(null, this.collection);
}));
}, this.events);
};
/**
* Makes the indexes in MongoDB match the indexes defined in this model's
* schema. This function will drop any indexes that are not defined in
* the model's schema except the `_id` index, and build any indexes that
* are in your schema but not in MongoDB.
*
* See the [introductory blog post](http://thecodebarbarian.com/whats-new-in-mongoose-5-2-syncindexes)
* for more information.
*
* ####Example:
*
* const schema = new Schema({ name: { type: String, unique: true } });
* const Customer = mongoose.model('Customer', schema);
* await Customer.createIndex({ age: 1 }); // Index is not in schema
* // Will drop the 'age' index and create an index on `name`
* await Customer.syncIndexes();
*
* @param {Object} [options] options to pass to `ensureIndexes()`
* @param {Function} [callback] optional callback
* @return {Promise|undefined} Returns `undefined` if callback is specified, returns a promise if no callback.
* @api public
*/
Model.syncIndexes = function syncIndexes(options, callback) {
callback = this.$wrapCallback(callback);
const dropNonSchemaIndexes = (cb) => {
this.listIndexes((err, indexes) => {
if (err != null) {
return cb(err);
}
const schemaIndexes = this.schema.indexes();
const toDrop = [];
for (const index of indexes) {
let found = false;
// Never try to drop `_id` index, MongoDB server doesn't allow it
if (index.key._id) {
continue;
}
for (const schemaIndex of schemaIndexes) {
const key = schemaIndex[0];
const options = _decorateDiscriminatorIndexOptions(this,
utils.clone(schemaIndex[1]));
// If these options are different, need to rebuild the index
const optionKeys = ['unique', 'partialFilterExpression', 'sparse', 'expireAfterSeconds'];
const indexCopy = Object.assign({}, index);
for (const key of optionKeys) {
if (!(key in options) && !(key in indexCopy)) {
continue;
}
indexCopy[key] = options[key];
}
if (utils.deepEqual(key, index.key) &&
utils.deepEqual(index, indexCopy)) {
found = true;
break;
}
}
if (!found) {
toDrop.push(index.name);
}
}
if (toDrop.length === 0) {
return cb(null, []);
}
dropIndexes(toDrop, cb);
});
};
const dropIndexes = (toDrop, cb) => {
let remaining = toDrop.length;
let error = false;
toDrop.forEach(indexName => {
this.collection.dropIndex(indexName, err => {
if (err != null) {
error = true;
return cb(err);
}
if (!error) {
--remaining || cb(null, toDrop);
}
});
});
};
return utils.promiseOrCallback(callback, cb => {
dropNonSchemaIndexes((err, dropped) => {
if (err != null) {
return cb(err);
}
this.createIndexes(options, err => {
if (err != null) {
return cb(err);
}
cb(null, dropped);
});
});
}, this.events);
};
/**
* Lists the indexes currently defined in MongoDB. This may or may not be
* the same as the indexes defined in your schema depending on whether you
* use the [`autoIndex` option](/docs/guide.html#autoIndex) and if you
* build indexes manually.
*
* @param {Function} [cb] optional callback
* @return {Promise|undefined} Returns `undefined` if callback is specified, returns a promise if no callback.
* @api public
*/
Model.listIndexes = function init(callback) {
callback = this.$wrapCallback(callback);
const _listIndexes = cb => {
this.collection.listIndexes().toArray(cb);
};
return utils.promiseOrCallback(callback, cb => {
// Buffering
if (this.collection.buffer) {
this.collection.addQueue(_listIndexes, [cb]);
} else {
_listIndexes(cb);
}
}, this.events);
};
/**
* Sends `createIndex` commands to mongo for each index declared in the schema.
* The `createIndex` commands are sent in series.
*
* ####Example:
*
* Event.ensureIndexes(function (err) {
* if (err) return handleError(err);
* });
*
* After completion, an `index` event is emitted on this `Model` passing an error if one occurred.
*
* ####Example:
*
* var eventSchema = new Schema({ thing: { type: 'string', unique: true }})
* var Event = mongoose.model('Event', eventSchema);
*
* Event.on('index', function (err) {
* if (err) console.error(err); // error occurred during index creation
* })
*
* _NOTE: It is not recommended that you run this in production. Index creation may impact database performance depending on your load. Use with caution._
*
* @param {Object} [options] internal options
* @param {Function} [cb] optional callback
* @return {Promise}
* @api public
*/
Model.ensureIndexes = function ensureIndexes(options, callback) {
if (typeof options === 'function') {
callback = options;
options = null;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return utils.promiseOrCallback(callback, cb => {
_ensureIndexes(this, options || {}, error => {
if (error) {
return cb(error);
}
cb(null);
});
}, this.events);
};
/**
* Similar to `ensureIndexes()`, except for it uses the [`createIndex`](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#createIndex)
* function.
*
* @param {Object} [options] internal options
* @param {Function} [cb] optional callback
* @return {Promise}
* @api public
*/
Model.createIndexes = function createIndexes(options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options = options || {};
options.createIndex = true;
return this.ensureIndexes(options, callback);
};
/*!
* ignore
*/
function _ensureIndexes(model, options, callback) {
const indexes = model.schema.indexes();
options = options || {};
const done = function(err) {
if (err && !model.$caught) {
model.emit('error', err);
}
model.emit('index', err);
callback && callback(err);
};
for (const index of indexes) {
const keys = Object.keys(index[0]);
if (keys.length === 1 && keys[0] === '_id' && index[0]._id !== 'hashed') {
console.warn('mongoose: Cannot specify a custom index on `_id` for ' +
'model name "' + model.modelName + '", ' +
'MongoDB does not allow overwriting the default `_id` index. See ' +
'http://bit.ly/mongodb-id-index');
}
}
if (!indexes.length) {
immediate(function() {
done();
});
return;
}
// Indexes are created one-by-one to support how MongoDB < 2.4 deals
// with background indexes.
const indexSingleDone = function(err, fields, options, name) {
model.emit('index-single-done', err, fields, options, name);
};
const indexSingleStart = function(fields, options) {
model.emit('index-single-start', fields, options);
};
const create = function() {
if (options._automatic) {
if (model.schema.options.autoIndex === false ||
(model.schema.options.autoIndex == null && model.db.config.autoIndex === false)) {
return done();
}
}
const index = indexes.shift();
if (!index) {
return done();
}
const indexFields = utils.clone(index[0]);
const indexOptions = utils.clone(index[1]);
_decorateDiscriminatorIndexOptions(model, indexOptions);
if ('safe' in options) {
_handleSafe(options);
}
applyWriteConcern(model.schema, indexOptions);
indexSingleStart(indexFields, options);
let useCreateIndex = !!model.base.options.useCreateIndex;
if ('useCreateIndex' in model.db.config) {
useCreateIndex = !!model.db.config.useCreateIndex;
}
if ('createIndex' in options) {
useCreateIndex = !!options.createIndex;
}
const methodName = useCreateIndex ? 'createIndex' : 'ensureIndex';
model.collection[methodName](indexFields, indexOptions, utils.tick(function(err, name) {
indexSingleDone(err, indexFields, indexOptions, name);
if (err) {
return done(err);
}
create();
}));
};
immediate(function() {
// If buffering is off, do this manually.
if (options._automatic && !model.collection.collection) {
model.collection.addQueue(create, []);
} else {
create();
}
});
}
function _decorateDiscriminatorIndexOptions(model, indexOptions) {
// If the model is a discriminator and it has a unique index, add a
// partialFilterExpression by default so the unique index will only apply
// to that discriminator.
if (model.baseModelName != null && indexOptions.unique &&
!('partialFilterExpression' in indexOptions) &&
!('sparse' in indexOptions)) {
indexOptions.partialFilterExpression = {
[model.schema.options.discriminatorKey]: model.modelName
};
}
return indexOptions;
}
const safeDeprecationWarning = 'Mongoose: the `safe` option for `save()` is ' +
'deprecated. Use the `w` option instead: http://bit.ly/mongoose-save';
const _handleSafe = util.deprecate(function _handleSafe(options) {
if (options.safe) {
if (typeof options.safe === 'boolean') {
options.w = options.safe;
delete options.safe;
}
if (typeof options.safe === 'object') {
options.w = options.safe.w;
options.j = options.safe.j;
options.wtimeout = options.safe.wtimeout;
delete options.safe;
}
}
}, safeDeprecationWarning);
/**
* Schema the model uses.
*
* @property schema
* @receiver Model
* @api public
* @memberOf Model
*/
Model.schema;
/*!
* Connection instance the model uses.
*
* @property db
* @api public
* @memberOf Model
*/
Model.db;
/*!
* Collection the model uses.
*
* @property collection
* @api public
* @memberOf Model
*/
Model.collection;
/**
* Base Mongoose instance the model uses.
*
* @property base
* @api public
* @memberOf Model
*/
Model.base;
/**
* Registered discriminators for this model.
*
* @property discriminators
* @api public
* @memberOf Model
*/
Model.discriminators;
/**
* Translate any aliases fields/conditions so the final query or document object is pure
*
* ####Example:
*
* Character
* .find(Character.translateAliases({
* '名': 'Eddard Stark' // Alias for 'name'
* })
* .exec(function(err, characters) {})
*
* ####Note:
* Only translate arguments of object type anything else is returned raw
*
* @param {Object} raw fields/conditions that may contain aliased keys
* @return {Object} the translated 'pure' fields/conditions
*/
Model.translateAliases = function translateAliases(fields) {
if (typeof fields === 'object') {
// Fields is an object (query conditions or document fields)
for (const key in fields) {
let alias;
const translated = [];
const fieldKeys = key.split('.');
let currentSchema = this.schema;
for (const field in fieldKeys) {
const name = fieldKeys[field];
if (currentSchema && currentSchema.aliases[name]) {
alias = currentSchema.aliases[name];
// Alias found,
translated.push(alias);
} else {
// Alias not found, so treat as un-aliased key
translated.push(name);
}
// Check if aliased path is a schema
if (currentSchema.paths[alias])
currentSchema = currentSchema.paths[alias].schema;
else
currentSchema = null;
}
const translatedKey = translated.join('.');
fields[translatedKey] = fields[key];
if (translatedKey !== key)
delete fields[key]; // We'll be using the translated key instead
}
return fields;
} else {
// Don't know typeof fields
return fields;
}
};
/**
* Removes all documents that match `conditions` from the collection.
* To remove just the first document that matches `conditions`, set the `single`
* option to true.
*
* ####Example:
*
* Character.remove({ name: 'Eddard Stark' }, function (err) {});
*
* ####Note:
*
* This method sends a remove command directly to MongoDB, no Mongoose documents
* are involved. Because no Mongoose documents are involved, _no middleware
* (hooks) are executed_.
*
* @param {Object} conditions
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.remove = function remove(conditions, callback) {
if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
}
// get the mongodb collection object
const mq = new this.Query({}, {}, this, this.collection);
callback = this.$wrapCallback(callback);
return mq.remove(conditions, callback);
};
/**
* Deletes the first document that matches `conditions` from the collection.
* Behaves like `remove()`, but deletes at most one document regardless of the
* `single` option.
*
* ####Example:
*
* Character.deleteOne({ name: 'Eddard Stark' }, function (err) {});
*
* ####Note:
*
* Like `Model.remove()`, this function does **not** trigger `pre('remove')` or `post('remove')` hooks.
*
* @param {Object} conditions
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.deleteOne = function deleteOne(conditions, callback) {
if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
}
// get the mongodb collection object
const mq = new this.Query(conditions, {}, this, this.collection);
callback = this.$wrapCallback(callback);
return mq.deleteOne(callback);
};
/**
* Deletes all of the documents that match `conditions` from the collection.
* Behaves like `remove()`, but deletes all documents that match `conditions`
* regardless of the `single` option.
*
* ####Example:
*
* Character.deleteMany({ name: /Stark/, age: { $gte: 18 } }, function (err) {});
*
* ####Note:
*
* Like `Model.remove()`, this function does **not** trigger `pre('remove')` or `post('remove')` hooks.
*
* @param {Object} conditions
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.deleteMany = function deleteMany(conditions, options, callback) {
if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
options = null;
}
else if (typeof options === 'function') {
callback = options;
options = null;
}
// get the mongodb collection object
const mq = new this.Query(conditions, {}, this, this.collection);
mq.setOptions(options);
if (callback) {
callback = this.$wrapCallback(callback);
}
return mq.deleteMany(callback);
};
/**
* Finds documents
*
* The `conditions` are cast to their respective SchemaTypes before the command is sent.
*
* ####Examples:
*
* // named john and at least 18
* MyModel.find({ name: 'john', age: { $gte: 18 }});
*
* // executes, passing results to callback
* MyModel.find({ name: 'john', age: { $gte: 18 }}, function (err, docs) {});
*
* // executes, name LIKE john and only selecting the "name" and "friends" fields
* MyModel.find({ name: /john/i }, 'name friends', function (err, docs) { })
*
* // passing options
* MyModel.find({ name: /john/i }, null, { skip: 10 })
*
* // passing options and executes
* MyModel.find({ name: /john/i }, null, { skip: 10 }, function (err, docs) {});
*
* // executing a query explicitly
* var query = MyModel.find({ name: /john/i }, null, { skip: 10 })
* query.exec(function (err, docs) {});
*
* // using the promise returned from executing a query
* var query = MyModel.find({ name: /john/i }, null, { skip: 10 });
* var promise = query.exec();
* promise.addBack(function (err, docs) {});
*
* @param {Object} conditions
* @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select)
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @see field selection #query_Query-select
* @see promise #promise-js
* @api public
*/
Model.find = function find(conditions, projection, options, callback) {
if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
projection = null;
options = null;
} else if (typeof projection === 'function') {
callback = projection;
projection = null;
options = null;
} else if (typeof options === 'function') {
callback = options;
options = null;
}
const mq = new this.Query({}, {}, this, this.collection);
mq.select(projection);
mq.setOptions(options);
if (this.schema.discriminatorMapping &&
this.schema.discriminatorMapping.isRoot &&
mq.selectedInclusively()) {
// Need to select discriminator key because original schema doesn't have it
mq.select(this.schema.options.discriminatorKey);
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return mq.find(conditions, callback);
};
/**
* Finds a single document by its _id field. `findById(id)` is almost*
* equivalent to `findOne({ _id: id })`. If you want to query by a document's
* `_id`, use `findById()` instead of `findOne()`.
*
* The `id` is cast based on the Schema before sending the command.
*
* This function triggers the following middleware.
*
* - `findOne()`
*
* \* Except for how it treats `undefined`. If you use `findOne()`, you'll see
* that `findOne(undefined)` and `findOne({ _id: undefined })` are equivalent
* to `findOne({})` and return arbitrary documents. However, mongoose
* translates `findById(undefined)` into `findOne({ _id: null })`.
*
* ####Example:
*
* // find adventure by id and execute
* Adventure.findById(id, function (err, adventure) {});
*
* // same as above
* Adventure.findById(id).exec(callback);
*
* // select only the adventures name and length
* Adventure.findById(id, 'name length', function (err, adventure) {});
*
* // same as above
* Adventure.findById(id, 'name length').exec(callback);
*
* // include all properties except for `length`
* Adventure.findById(id, '-length').exec(function (err, adventure) {});
*
* // passing options (in this case return the raw js objects, not mongoose documents by passing `lean`
* Adventure.findById(id, 'name', { lean: true }, function (err, doc) {});
*
* // same as above
* Adventure.findById(id, 'name').lean().exec(function (err, doc) {});
*
* @param {Object|String|Number} id value of `_id` to query by
* @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select)
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @see field selection #query_Query-select
* @see lean queries #query_Query-lean
* @api public
*/
Model.findById = function findById(id, projection, options, callback) {
if (typeof id === 'undefined') {
id = null;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return this.findOne({_id: id}, projection, options, callback);
};
/**
* Finds one document.
*
* The `conditions` are cast to their respective SchemaTypes before the command is sent.
*
* *Note:* `conditions` is optional, and if `conditions` is null or undefined,
* mongoose will send an empty `findOne` command to MongoDB, which will return
* an arbitrary document. If you're querying by `_id`, use `findById()` instead.
*
* ####Example:
*
* // find one iphone adventures - iphone adventures??
* Adventure.findOne({ type: 'iphone' }, function (err, adventure) {});
*
* // same as above
* Adventure.findOne({ type: 'iphone' }).exec(function (err, adventure) {});
*
* // select only the adventures name
* Adventure.findOne({ type: 'iphone' }, 'name', function (err, adventure) {});
*
* // same as above
* Adventure.findOne({ type: 'iphone' }, 'name').exec(function (err, adventure) {});
*
* // specify options, in this case lean
* Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }, callback);
*
* // same as above
* Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }).exec(callback);
*
* // chaining findOne queries (same as above)
* Adventure.findOne({ type: 'iphone' }).select('name').lean().exec(callback);
*
* @param {Object} [conditions]
* @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select)
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @see field selection #query_Query-select
* @see lean queries #query_Query-lean
* @api public
*/
Model.findOne = function findOne(conditions, projection, options, callback) {
if (typeof options === 'function') {
callback = options;
options = null;
} else if (typeof projection === 'function') {
callback = projection;
projection = null;
options = null;
} else if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
projection = null;
options = null;
}
// get the mongodb collection object
const mq = new this.Query({}, {}, this, this.collection);
mq.select(projection);
mq.setOptions(options);
if (this.schema.discriminatorMapping &&
this.schema.discriminatorMapping.isRoot &&
mq.selectedInclusively()) {
mq.select(this.schema.options.discriminatorKey);
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return mq.findOne(conditions, callback);
};
/**
* Estimates the number of documents in the MongoDB collection. Faster than
* using `countDocuments()` for large collections because
* `estimatedDocumentCount()` uses collection metadata rather than scanning
* the entire collection.
*
* ####Example:
*
* const numAdventures = Adventure.estimatedDocumentCount();
*
* @param {Object} [options]
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.estimatedDocumentCount = function estimatedDocumentCount(options, callback) {
// get the mongodb collection object
const mq = new this.Query({}, {}, this, this.collection);
callback = this.$wrapCallback(callback);
return mq.estimatedDocumentCount(options, callback);
};
/**
* Counts number of documents matching `filter` in a database collection.
*
* ####Example:
*
* Adventure.countDocuments({ type: 'jungle' }, function (err, count) {
* console.log('there are %d jungle adventures', count);
* });
*
* If you want to count all documents in a large collection,
* use the [`estimatedDocumentCount()` function](/docs/api.html#model_Model.estimatedDocumentCount)
* instead. If you call `countDocuments({})`, MongoDB will always execute
* a full collection scan and **not** use any indexes.
*
* The `countDocuments()` function is similar to `count()`, but there are a
* [few operators that `countDocuments()` does not support](https://mongodb.github.io/node-mongodb-native/3.1/api/Collection.html#countDocuments).
* Below are the operators that `count()` supports but `countDocuments()` does not,
* and the suggested replacement:
*
* - `$where`: [`$expr`](https://docs.mongodb.com/manual/reference/operator/query/expr/)
* - `$near`: [`$geoWithin`](https://docs.mongodb.com/manual/reference/operator/query/geoWithin/) with [`$center`](https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center)
* - `$nearSphere`: [`$geoWithin`](https://docs.mongodb.com/manual/reference/operator/query/geoWithin/) with [`$centerSphere`](https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere)
*
* @param {Object} filter
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.countDocuments = function countDocuments(conditions, callback) {
if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
}
// get the mongodb collection object
const mq = new this.Query({}, {}, this, this.collection);
callback = this.$wrapCallback(callback);
return mq.countDocuments(conditions, callback);
};
/**
* Counts number of documents that match `filter` in a database collection.
*
* This method is deprecated. If you want to count the number of documents in
* a collection, e.g. `count({})`, use the [`estimatedDocumentCount()` function](/docs/api.html#model_Model.estimatedDocumentCount)
* instead. Otherwise, use the [`countDocuments()`](/docs/api.html#model_Model.countDocuments) function instead.
*
* ####Example:
*
* Adventure.count({ type: 'jungle' }, function (err, count) {
* if (err) ..
* console.log('there are %d jungle adventures', count);
* });
*
* @deprecated
* @param {Object} filter
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.count = function count(conditions, callback) {
if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
}
// get the mongodb collection object
const mq = new this.Query({}, {}, this, this.collection);
if (callback) {
callback = this.$wrapCallback(callback);
}
return mq.count(conditions, callback);
};
/**
* Creates a Query for a `distinct` operation.
*
* Passing a `callback` executes the query.
*
* ####Example
*
* Link.distinct('url', { clicks: {$gt: 100}}, function (err, result) {
* if (err) return handleError(err);
*
* assert(Array.isArray(result));
* console.log('unique urls with more than 100 clicks', result);
* })
*
* var query = Link.distinct('url');
* query.exec(callback);
*
* @param {String} field
* @param {Object} [conditions] optional
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.distinct = function distinct(field, conditions, callback) {
// get the mongodb collection object
const mq = new this.Query({}, {}, this, this.collection);
if (typeof conditions === 'function') {
callback = conditions;
conditions = {};
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return mq.distinct(field, conditions, callback);
};
/**
* Creates a Query, applies the passed conditions, and returns the Query.
*
* For example, instead of writing:
*
* User.find({age: {$gte: 21, $lte: 65}}, callback);
*
* we can instead write:
*
* User.where('age').gte(21).lte(65).exec(callback);
*
* Since the Query class also supports `where` you can continue chaining
*
* User
* .where('age').gte(21).lte(65)
* .where('name', /^b/i)
* ... etc
*
* @param {String} path
* @param {Object} [val] optional value
* @return {Query}
* @api public
*/
Model.where = function where(path, val) {
void val; // eslint
// get the mongodb collection object
const mq = new this.Query({}, {}, this, this.collection).find({});
return mq.where.apply(mq, arguments);
};
/**
* Creates a `Query` and specifies a `$where` condition.
*
* Sometimes you need to query for things in mongodb using a JavaScript expression. You can do so via `find({ $where: javascript })`, or you can use the mongoose shortcut method $where via a Query chain or from your mongoose Model.
*
* Blog.$where('this.username.indexOf("val") !== -1').exec(function (err, docs) {});
*
* @param {String|Function} argument is a javascript string or anonymous function
* @method $where
* @memberOf Model
* @return {Query}
* @see Query.$where #query_Query-%24where
* @api public
*/
Model.$where = function $where() {
const mq = new this.Query({}, {}, this, this.collection).find({});
return mq.$where.apply(mq, arguments);
};
/**
* Issues a mongodb findAndModify update command.
*
* Finds a matching document, updates it according to the `update` arg, passing any `options`, and returns the found document (if any) to the callback. The query executes if `callback` is passed else a Query object is returned.
*
* ####Options:
*
* - `new`: bool - if true, return the modified document rather than the original. defaults to false (changed in 4.0)
* - `upsert`: bool - creates the object if it doesn't exist. defaults to false.
* - `fields`: {Object|String} - Field selection. Equivalent to `.select(fields).findOneAndUpdate()`
* - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema.
* - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/).
* - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify)
* - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update
*
* ####Examples:
*
* A.findOneAndUpdate(conditions, update, options, callback) // executes
* A.findOneAndUpdate(conditions, update, options) // returns Query
* A.findOneAndUpdate(conditions, update, callback) // executes
* A.findOneAndUpdate(conditions, update) // returns Query
* A.findOneAndUpdate() // returns Query
*
* ####Note:
*
* All top level update keys which are not `atomic` operation names are treated as set operations:
*
* ####Example:
*
* var query = { name: 'borne' };
* Model.findOneAndUpdate(query, { name: 'jason bourne' }, options, callback)
*
* // is sent as
* Model.findOneAndUpdate(query, { $set: { name: 'jason bourne' }}, options, callback)
*
* This helps prevent accidentally overwriting your document with `{ name: 'jason bourne' }`.
*
* ####Note:
*
* Values are cast to their appropriate types when using the findAndModify helpers.
* However, the below are not executed by default.
*
* - defaults. Use the `setDefaultsOnInsert` option to override.
*
* `findAndModify` helpers support limited validation. You can
* enable these by setting the `runValidators` options,
* respectively.
*
* If you need full-fledged validation, use the traditional approach of first
* retrieving the document.
*
* Model.findById(id, function (err, doc) {
* if (err) ..
* doc.name = 'jason bourne';
* doc.save(callback);
* });
*
* @param {Object} [conditions]
* @param {Object} [update]
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](http://mongoosejs.com/docs/api.html#query_Query-lean).
* @param {Function} [callback]
* @return {Query}
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @api public
*/
Model.findOneAndUpdate = function(conditions, update, options, callback) {
if (typeof options === 'function') {
callback = options;
options = null;
} else if (arguments.length === 1) {
if (typeof conditions === 'function') {
const msg = 'Model.findOneAndUpdate(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options, callback)\n'
+ ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options)\n'
+ ' ' + this.modelName + '.findOneAndUpdate(conditions, update)\n'
+ ' ' + this.modelName + '.findOneAndUpdate(update)\n'
+ ' ' + this.modelName + '.findOneAndUpdate()\n';
throw new TypeError(msg);
}
update = conditions;
conditions = undefined;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
let fields;
if (options) {
fields = options.fields || options.projection;
}
update = utils.clone(update, {
depopulate: true,
_isNested: true
});
_decorateUpdateWithVersionKey(update, options, this.schema.options.versionKey);
const mq = new this.Query({}, {}, this, this.collection);
mq.select(fields);
return mq.findOneAndUpdate(conditions, update, options, callback);
};
/*!
* Decorate the update with a version key, if necessary
*/
function _decorateUpdateWithVersionKey(update, options, versionKey) {
if (!versionKey || !get(options, 'upsert', false)) {
return;
}
const updatedPaths = modifiedPaths(update);
if (!updatedPaths[versionKey]) {
if (options.overwrite) {
update[versionKey] = 0;
} else {
if (!update.$setOnInsert) {
update.$setOnInsert = {};
}
update.$setOnInsert[versionKey] = 0;
}
}
}
/**
* Issues a mongodb findAndModify update command by a document's _id field.
* `findByIdAndUpdate(id, ...)` is equivalent to `findOneAndUpdate({ _id: id }, ...)`.
*
* Finds a matching document, updates it according to the `update` arg,
* passing any `options`, and returns the found document (if any) to the
* callback. The query executes if `callback` is passed.
*
* This function triggers the following middleware.
*
* - `findOneAndUpdate()`
*
* ####Options:
*
* - `new`: bool - true to return the modified document rather than the original. defaults to false
* - `upsert`: bool - creates the object if it doesn't exist. defaults to false.
* - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema.
* - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/).
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `select`: sets the document fields to return
* - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify)
* - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update
*
* ####Examples:
*
* A.findByIdAndUpdate(id, update, options, callback) // executes
* A.findByIdAndUpdate(id, update, options) // returns Query
* A.findByIdAndUpdate(id, update, callback) // executes
* A.findByIdAndUpdate(id, update) // returns Query
* A.findByIdAndUpdate() // returns Query
*
* ####Note:
*
* All top level update keys which are not `atomic` operation names are treated as set operations:
*
* ####Example:
*
* Model.findByIdAndUpdate(id, { name: 'jason bourne' }, options, callback)
*
* // is sent as
* Model.findByIdAndUpdate(id, { $set: { name: 'jason bourne' }}, options, callback)
*
* This helps prevent accidentally overwriting your document with `{ name: 'jason bourne' }`.
*
* ####Note:
*
* Values are cast to their appropriate types when using the findAndModify helpers.
* However, the below are not executed by default.
*
* - defaults. Use the `setDefaultsOnInsert` option to override.
*
* `findAndModify` helpers support limited validation. You can
* enable these by setting the `runValidators` options,
* respectively.
*
* If you need full-fledged validation, use the traditional approach of first
* retrieving the document.
*
* Model.findById(id, function (err, doc) {
* if (err) ..
* doc.name = 'jason bourne';
* doc.save(callback);
* });
*
* @param {Object|Number|String} id value of `_id` to query by
* @param {Object} [update]
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](http://mongoosejs.com/docs/api.html#query_Query-lean).
* @param {Function} [callback]
* @return {Query}
* @see Model.findOneAndUpdate #model_Model.findOneAndUpdate
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @api public
*/
Model.findByIdAndUpdate = function(id, update, options, callback) {
if (callback) {
callback = this.$wrapCallback(callback);
}
if (arguments.length === 1) {
if (typeof id === 'function') {
const msg = 'Model.findByIdAndUpdate(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findByIdAndUpdate(id, callback)\n'
+ ' ' + this.modelName + '.findByIdAndUpdate(id)\n'
+ ' ' + this.modelName + '.findByIdAndUpdate()\n';
throw new TypeError(msg);
}
return this.findOneAndUpdate({_id: id}, undefined);
}
// if a model is passed in instead of an id
if (id instanceof Document) {
id = id._id;
}
return this.findOneAndUpdate.call(this, {_id: id}, update, options, callback);
};
/**
* Issue a MongoDB `findOneAndDelete()` command.
*
* Finds a matching document, removes it, and passes the found document
* (if any) to the callback.
*
* Executes the query if `callback` is passed.
*
* This function triggers the following middleware.
*
* - `findOneAndDelete()`
*
* This function differs slightly from `Model.findOneAndRemove()` in that
* `findOneAndRemove()` becomes a [MongoDB `findAndModify()` command](https://docs.mongodb.com/manual/reference/method/db.collection.findAndModify/),
* as opposed to a `findOneAndDelete()` command. For most mongoose use cases,
* this distinction is purely pedantic. You should use `findOneAndDelete()`
* unless you have a good reason not to.
*
* ####Options:
*
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0
* - `select`: sets the document fields to return
* - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }`
* - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify)
* - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update
*
* ####Examples:
*
* A.findOneAndDelete(conditions, options, callback) // executes
* A.findOneAndDelete(conditions, options) // return Query
* A.findOneAndDelete(conditions, callback) // executes
* A.findOneAndDelete(conditions) // returns Query
* A.findOneAndDelete() // returns Query
*
* Values are cast to their appropriate types when using the findAndModify helpers.
* However, the below are not executed by default.
*
* - defaults. Use the `setDefaultsOnInsert` option to override.
*
* `findAndModify` helpers support limited validation. You can
* enable these by setting the `runValidators` options,
* respectively.
*
* If you need full-fledged validation, use the traditional approach of first
* retrieving the document.
*
* Model.findById(id, function (err, doc) {
* if (err) ..
* doc.name = 'jason bourne';
* doc.save(callback);
* });
*
* @param {Object} conditions
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.findOneAndDelete = function(conditions, options, callback) {
if (arguments.length === 1 && typeof conditions === 'function') {
const msg = 'Model.findOneAndDelete(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findOneAndDelete(conditions, callback)\n'
+ ' ' + this.modelName + '.findOneAndDelete(conditions)\n'
+ ' ' + this.modelName + '.findOneAndDelete()\n';
throw new TypeError(msg);
}
if (typeof options === 'function') {
callback = options;
options = undefined;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
let fields;
if (options) {
fields = options.select;
options.select = undefined;
}
const mq = new this.Query({}, {}, this, this.collection);
mq.select(fields);
return mq.findOneAndDelete(conditions, options, callback);
};
/**
* Issue a MongoDB `findOneAndDelete()` command by a document's _id field.
* In other words, `findByIdAndDelete(id)` is a shorthand for
* `findOneAndDelete({ _id: id })`.
*
* This function triggers the following middleware.
*
* - `findOneAndDelete()`
*
* @param {Object|Number|String} id value of `_id` to query by
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @see Model.findOneAndRemove #model_Model.findOneAndRemove
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
*/
Model.findByIdAndDelete = function(id, options, callback) {
if (arguments.length === 1 && typeof id === 'function') {
const msg = 'Model.findByIdAndDelete(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findByIdAndDelete(id, callback)\n'
+ ' ' + this.modelName + '.findByIdAndDelete(id)\n'
+ ' ' + this.modelName + '.findByIdAndDelete()\n';
throw new TypeError(msg);
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return this.findOneAndDelete({_id: id}, options, callback);
};
/**
* Issue a MongoDB `findOneAndReplace()` command.
*
* Finds a matching document, replaces it with the provided doc, and passes the
* returned doc to the callback.
*
* Executes the query if `callback` is passed.
*
* This function triggers the following query middleware.
*
* - `findOneAndReplace()`
*
* ####Options:
*
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0
* - `select`: sets the document fields to return
* - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }`
* - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify)
* - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update
*
* ####Examples:
*
* A.findOneAndReplace(conditions, options, callback) // executes
* A.findOneAndReplace(conditions, options) // return Query
* A.findOneAndReplace(conditions, callback) // executes
* A.findOneAndReplace(conditions) // returns Query
* A.findOneAndReplace() // returns Query
*
* Values are cast to their appropriate types when using the findAndModify helpers.
* However, the below are not executed by default.
*
* - defaults. Use the `setDefaultsOnInsert` option to override.
*
* @param {Object} conditions
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.findOneAndReplace = function(conditions, options, callback) {
if (arguments.length === 1 && typeof conditions === 'function') {
const msg = 'Model.findOneAndDelete(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findOneAndDelete(conditions, callback)\n'
+ ' ' + this.modelName + '.findOneAndDelete(conditions)\n'
+ ' ' + this.modelName + '.findOneAndDelete()\n';
throw new TypeError(msg);
}
if (typeof options === 'function') {
callback = options;
options = undefined;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
let fields;
if (options) {
fields = options.select;
options.select = undefined;
}
const mq = new this.Query({}, {}, this, this.collection);
mq.select(fields);
return mq.findOneAndReplace(conditions, options, callback);
};
/**
* Issue a mongodb findAndModify remove command.
*
* Finds a matching document, removes it, passing the found document (if any) to the callback.
*
* Executes the query if `callback` is passed.
*
* This function triggers the following middleware.
*
* - `findOneAndRemove()`
*
* ####Options:
*
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0
* - `select`: sets the document fields to return
* - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }`
* - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify)
* - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update
*
* ####Examples:
*
* A.findOneAndRemove(conditions, options, callback) // executes
* A.findOneAndRemove(conditions, options) // return Query
* A.findOneAndRemove(conditions, callback) // executes
* A.findOneAndRemove(conditions) // returns Query
* A.findOneAndRemove() // returns Query
*
* Values are cast to their appropriate types when using the findAndModify helpers.
* However, the below are not executed by default.
*
* - defaults. Use the `setDefaultsOnInsert` option to override.
*
* `findAndModify` helpers support limited validation. You can
* enable these by setting the `runValidators` options,
* respectively.
*
* If you need full-fledged validation, use the traditional approach of first
* retrieving the document.
*
* Model.findById(id, function (err, doc) {
* if (err) ..
* doc.name = 'jason bourne';
* doc.save(callback);
* });
*
* @param {Object} conditions
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
* @api public
*/
Model.findOneAndRemove = function(conditions, options, callback) {
if (arguments.length === 1 && typeof conditions === 'function') {
const msg = 'Model.findOneAndRemove(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findOneAndRemove(conditions, callback)\n'
+ ' ' + this.modelName + '.findOneAndRemove(conditions)\n'
+ ' ' + this.modelName + '.findOneAndRemove()\n';
throw new TypeError(msg);
}
if (typeof options === 'function') {
callback = options;
options = undefined;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
let fields;
if (options) {
fields = options.select;
options.select = undefined;
}
const mq = new this.Query({}, {}, this, this.collection);
mq.select(fields);
return mq.findOneAndRemove(conditions, options, callback);
};
/**
* Issue a mongodb findAndModify remove command by a document's _id field. `findByIdAndRemove(id, ...)` is equivalent to `findOneAndRemove({ _id: id }, ...)`.
*
* Finds a matching document, removes it, passing the found document (if any) to the callback.
*
* Executes the query if `callback` is passed.
*
* This function triggers the following middleware.
*
* - `findOneAndRemove()`
*
* ####Options:
*
* - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update
* - `select`: sets the document fields to return
* - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify)
* - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update
*
* ####Examples:
*
* A.findByIdAndRemove(id, options, callback) // executes
* A.findByIdAndRemove(id, options) // return Query
* A.findByIdAndRemove(id, callback) // executes
* A.findByIdAndRemove(id) // returns Query
* A.findByIdAndRemove() // returns Query
*
* @param {Object|Number|String} id value of `_id` to query by
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @see Model.findOneAndRemove #model_Model.findOneAndRemove
* @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command
*/
Model.findByIdAndRemove = function(id, options, callback) {
if (arguments.length === 1 && typeof id === 'function') {
const msg = 'Model.findByIdAndRemove(): First argument must not be a function.\n\n'
+ ' ' + this.modelName + '.findByIdAndRemove(id, callback)\n'
+ ' ' + this.modelName + '.findByIdAndRemove(id)\n'
+ ' ' + this.modelName + '.findByIdAndRemove()\n';
throw new TypeError(msg);
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return this.findOneAndRemove({_id: id}, options, callback);
};
/**
* Shortcut for saving one or more documents to the database.
* `MyModel.create(docs)` does `new MyModel(doc).save()` for every doc in
* docs.
*
* This function triggers the following middleware.
*
* - `save()`
*
* ####Example:
*
* // pass a spread of docs and a callback
* Candy.create({ type: 'jelly bean' }, { type: 'snickers' }, function (err, jellybean, snickers) {
* if (err) // ...
* });
*
* // pass an array of docs
* var array = [{ type: 'jelly bean' }, { type: 'snickers' }];
* Candy.create(array, function (err, candies) {
* if (err) // ...
*
* var jellybean = candies[0];
* var snickers = candies[1];
* // ...
* });
*
* // callback is optional; use the returned promise if you like:
* var promise = Candy.create({ type: 'jawbreaker' });
* promise.then(function (jawbreaker) {
* // ...
* })
*
* @param {Array|Object} docs Documents to insert, as a spread or array
* @param {Object} [options] Options passed down to `save()`. To specify `options`, `docs` **must** be an array, not a spread.
* @param {Function} [callback] callback
* @return {Promise}
* @api public
*/
Model.create = function create(doc, options, callback) {
let args;
let cb;
const discriminatorKey = this.schema.options.discriminatorKey;
if (Array.isArray(doc)) {
args = doc;
cb = typeof options === 'function' ? options : callback;
options = options != null && typeof options === 'object' ? options : {};
} else {
const last = arguments[arguments.length - 1];
options = {};
// Handle falsy callbacks re: #5061
if (typeof last === 'function' || !last) {
cb = last;
args = utils.args(arguments, 0, arguments.length - 1);
} else {
args = utils.args(arguments);
}
}
if (cb) {
cb = this.$wrapCallback(cb);
}
return utils.promiseOrCallback(cb, cb => {
if (args.length === 0) {
return cb(null);
}
const toExecute = [];
let firstError;
args.forEach(doc => {
toExecute.push(callback => {
const Model = this.discriminators && doc[discriminatorKey] != null ?
this.discriminators[doc[discriminatorKey]] || getDiscriminatorByValue(this, doc[discriminatorKey]) :
this;
if (Model == null) {
throw new Error(`Discriminator "${doc[discriminatorKey]}" not ` +
`found for model "${this.modelName}"`);
}
let toSave = doc;
const callbackWrapper = (error, doc) => {
if (error) {
if (!firstError) {
firstError = error;
}
return callback(null, { error: error });
}
callback(null, { doc: doc });
};
if (!(toSave instanceof Model)) {
try {
toSave = new Model(toSave);
} catch (error) {
return callbackWrapper(error);
}
}
toSave.save(options, callbackWrapper);
});
});
parallel(toExecute, (error, res) => {
const savedDocs = [];
const len = res.length;
for (let i = 0; i < len; ++i) {
if (res[i].doc) {
savedDocs.push(res[i].doc);
}
}
if (firstError) {
return cb(firstError, savedDocs);
}
if (doc instanceof Array) {
cb(null, savedDocs);
} else {
cb.apply(this, [null].concat(savedDocs));
}
});
}, this.events);
};
/**
* _Requires a replica set running MongoDB >= 3.6.0._ Watches the
* underlying collection for changes using
* [MongoDB change streams](https://docs.mongodb.com/manual/changeStreams/).
*
* This function does **not** trigger any middleware. In particular, it
* does **not** trigger aggregate middleware.
*
* The ChangeStream object is an event emitter that emits the following events:
*
* - 'change': A change occurred, see below example
* - 'error': An unrecoverable error occurred. In particular, change streams currently error out if they lose connection to the replica set primary. Follow [this GitHub issue](https://github.com/Automattic/mongoose/issues/6799) for updates.
* - 'end': Emitted if the underlying stream is closed
* - 'close': Emitted if the underlying stream is closed
*
* ####Example:
*
* const doc = await Person.create({ name: 'Ned Stark' });
* const changeStream = Person.watch().on('change', change => console.log(change));
* // Will print from the above `console.log()`:
* // { _id: { _data: ... },
* // operationType: 'delete',
* // ns: { db: 'mydb', coll: 'Person' },
* // documentKey: { _id: 5a51b125c5500f5aa094c7bd } }
* await doc.remove();
*
* @param {Array} [pipeline]
* @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/Collection.html#watch)
* @return {ChangeStream} mongoose-specific change stream wrapper, inherits from EventEmitter
* @api public
*/
Model.watch = function(pipeline, options) {
return new ChangeStream(this, pipeline, options);
};
/**
* _Requires MongoDB >= 3.6.0._ Starts a [MongoDB session](https://docs.mongodb.com/manual/release-notes/3.6/#client-sessions)
* for benefits like causal consistency, [retryable writes](https://docs.mongodb.com/manual/core/retryable-writes/),
* and [transactions](http://thecodebarbarian.com/a-node-js-perspective-on-mongodb-4-transactions.html).
*
* Calling `MyModel.startSession()` is equivalent to calling `MyModel.db.startSession()`.
*
* This function does not trigger any middleware.
*
* ####Example:
*
* const session = await Person.startSession();
* let doc = await Person.findOne({ name: 'Ned Stark' }, null, { session });
* await doc.remove();
* // `doc` will always be null, even if reading from a replica set
* // secondary. Without causal consistency, it is possible to
* // get a doc back from the below query if the query reads from a
* // secondary that is experiencing replication lag.
* doc = await Person.findOne({ name: 'Ned Stark' }, null, { session, readPreference: 'secondary' });
*
* @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/MongoClient.html#startSession)
* @param {Boolean} [options.causalConsistency=true] set to false to disable causal consistency
* @param {Function} [callback]
* @return {Promise<ClientSession>} promise that resolves to a MongoDB driver `ClientSession`
* @api public
*/
Model.startSession = function() {
return this.db.startSession.apply(this.db, arguments);
};
/**
* Shortcut for validating an array of documents and inserting them into
* MongoDB if they're all valid. This function is faster than `.create()`
* because it only sends one operation to the server, rather than one for each
* document.
*
* Mongoose always validates each document **before** sending `insertMany`
* to MongoDB. So if one document has a validation error, no documents will
* be saved, unless you set
* [the `ordered` option to false](https://docs.mongodb.com/manual/reference/method/db.collection.insertMany/#error-handling).
*
* This function does **not** trigger save middleware.
*
* This function triggers the following middleware.
*
* - `insertMany()`
*
* ####Example:
*
* var arr = [{ name: 'Star Wars' }, { name: 'The Empire Strikes Back' }];
* Movies.insertMany(arr, function(error, docs) {});
*
* @param {Array|Object|*} doc(s)
* @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#insertMany)
* @param {Boolean} [options.ordered = true] if true, will fail fast on the first error encountered. If false, will insert all the documents it can and report errors later. An `insertMany()` with `ordered = false` is called an "unordered" `insertMany()`.
* @param {Boolean} [options.rawResult = false] if false, the returned promise resolves to the documents that passed mongoose document validation. If `true`, will return the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~insertWriteOpCallback) with a `mongoose` property that contains `validationErrors` if this is an unordered `insertMany`.
* @param {Function} [callback] callback
* @return {Promise}
* @api public
*/
Model.insertMany = function(arr, options, callback) {
if (typeof options === 'function') {
callback = options;
options = null;
}
return utils.promiseOrCallback(callback, cb => {
this.$__insertMany(arr, options, cb);
}, this.events);
};
/*!
* ignore
*/
Model.$__insertMany = function(arr, options, callback) {
const _this = this;
if (typeof options === 'function') {
callback = options;
options = null;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
callback = callback || utils.noop;
options = options || {};
const limit = get(options, 'limit', 1000);
const rawResult = get(options, 'rawResult', false);
const ordered = get(options, 'ordered', true);
if (!Array.isArray(arr)) {
arr = [arr];
}
const toExecute = [];
const validationErrors = [];
arr.forEach(function(doc) {
toExecute.push(function(callback) {
if (!(doc instanceof _this)) {
doc = new _this(doc);
}
doc.validate({ __noPromise: true }, function(error) {
if (error) {
// Option `ordered` signals that insert should be continued after reaching
// a failing insert. Therefore we delegate "null", meaning the validation
// failed. It's up to the next function to filter out all failed models
if (ordered === false) {
validationErrors.push(error);
return callback(null, null);
}
return callback(error);
}
callback(null, doc);
});
});
});
parallelLimit(toExecute, limit, function(error, docs) {
if (error) {
callback(error, null);
return;
}
// We filter all failed pre-validations by removing nulls
const docAttributes = docs.filter(function(doc) {
return doc != null;
});
// Quickly escape while there aren't any valid docAttributes
if (docAttributes.length < 1) {
callback(null, []);
return;
}
const docObjects = docAttributes.map(function(doc) {
if (doc.schema.options.versionKey) {
doc[doc.schema.options.versionKey] = 0;
}
if (doc.initializeTimestamps) {
return doc.initializeTimestamps().toObject(internalToObjectOptions);
}
return doc.toObject(internalToObjectOptions);
});
_this.collection.insertMany(docObjects, options, function(error, res) {
if (error) {
callback(error, null);
return;
}
for (let i = 0; i < docAttributes.length; ++i) {
docAttributes[i].isNew = false;
docAttributes[i].emit('isNew', false);
docAttributes[i].constructor.emit('isNew', false);
}
if (rawResult) {
if (ordered === false) {
// Decorate with mongoose validation errors in case of unordered,
// because then still do `insertMany()`
res.mongoose = {
validationErrors: validationErrors
};
}
return callback(null, res);
}
callback(null, docAttributes);
});
});
};
/**
* Sends multiple `insertOne`, `updateOne`, `updateMany`, `replaceOne`,
* `deleteOne`, and/or `deleteMany` operations to the MongoDB server in one
* command. This is faster than sending multiple independent operations (like)
* if you use `create()`) because with `bulkWrite()` there is only one round
* trip to MongoDB.
*
* Mongoose will perform casting on all operations you provide.
*
* This function does **not** trigger any middleware, not `save()` nor `update()`.
* If you need to trigger
* `save()` middleware for every document use [`create()`](http://mongoosejs.com/docs/api.html#model_Model.create) instead.
*
* ####Example:
*
* Character.bulkWrite([
* {
* insertOne: {
* document: {
* name: 'Eddard Stark',
* title: 'Warden of the North'
* }
* }
* },
* {
* updateOne: {
* filter: { name: 'Eddard Stark' },
* // If you were using the MongoDB driver directly, you'd need to do
* // `update: { $set: { title: ... } }` but mongoose adds $set for
* // you.
* update: { title: 'Hand of the King' }
* }
* },
* {
* deleteOne: {
* {
* filter: { name: 'Eddard Stark' }
* }
* }
* }
* ]).then(res => {
* // Prints "1 1 1"
* console.log(res.insertedCount, res.modifiedCount, res.deletedCount);
* });
*
* @param {Array} ops
* @param {Object} [options]
* @param {Function} [callback] callback `function(error, bulkWriteOpResult) {}`
* @return {Promise} resolves to a [`BulkWriteOpResult`](http://mongodb.github.io/node-mongodb-native/3.1/api/Collection.html#~BulkWriteOpResult) if the operation succeeds
* @api public
*/
Model.bulkWrite = function(ops, options, callback) {
if (typeof options === 'function') {
callback = options;
options = null;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
options = options || {};
const validations = ops.map(op => castBulkWrite(this, op));
return utils.promiseOrCallback(callback, cb => {
parallel(validations, error => {
if (error) {
return cb(error);
}
this.collection.bulkWrite(ops, options, (error, res) => {
if (error) {
return cb(error);
}
cb(null, res);
});
});
}, this.events);
};
/**
* Shortcut for creating a new Document from existing raw data, pre-saved in the DB.
* The document returned has no paths marked as modified initially.
*
* ####Example:
*
* // hydrate previous data into a Mongoose document
* var mongooseCandy = Candy.hydrate({ _id: '54108337212ffb6d459f854c', type: 'jelly bean' });
*
* @param {Object} obj
* @return {Model} document instance
* @api public
*/
Model.hydrate = function(obj) {
const model = require('./queryhelpers').createModel(this, obj);
model.init(obj);
return model;
};
/**
* Updates one document in the database without returning it.
*
* This function triggers the following middleware.
*
* - `update()`
*
* ####Examples:
*
* MyModel.update({ age: { $gt: 18 } }, { oldEnough: true }, fn);
* MyModel.update({ name: 'Tobi' }, { ferret: true }, { multi: true }, function (err, raw) {
* if (err) return handleError(err);
* console.log('The raw response from Mongo was ', raw);
* });
*
* ####Valid options:
*
* - `safe` (boolean) safe mode (defaults to value set in schema (true))
* - `upsert` (boolean) whether to create the doc if it doesn't match (false)
* - `multi` (boolean) whether multiple documents should be updated (false)
* - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema.
* - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/).
* - `strict` (boolean) overrides the `strict` option for this update
* - `overwrite` (boolean) disables update-only mode, allowing you to overwrite the doc (false)
*
* All `update` values are cast to their appropriate SchemaTypes before being sent.
*
* The `callback` function receives `(err, rawResponse)`.
*
* - `err` is the error if any occurred
* - `rawResponse` is the full response from Mongo
*
* ####Note:
*
* All top level keys which are not `atomic` operation names are treated as set operations:
*
* ####Example:
*
* var query = { name: 'borne' };
* Model.update(query, { name: 'jason bourne' }, options, callback)
*
* // is sent as
* Model.update(query, { $set: { name: 'jason bourne' }}, options, callback)
* // if overwrite option is false. If overwrite is true, sent without the $set wrapper.
*
* This helps prevent accidentally overwriting all documents in your collection with `{ name: 'jason bourne' }`.
*
* ####Note:
*
* Be careful to not use an existing model instance for the update clause (this won't work and can cause weird behavior like infinite loops). Also, ensure that the update clause does not have an _id property, which causes Mongo to return a "Mod on _id not allowed" error.
*
* ####Note:
*
* Although values are casted to their appropriate types when using update, the following are *not* applied:
*
* - defaults
* - setters
* - validators
* - middleware
*
* If you need those features, use the traditional approach of first retrieving the document.
*
* Model.findOne({ name: 'borne' }, function (err, doc) {
* if (err) ..
* doc.name = 'jason bourne';
* doc.save(callback);
* })
*
* @see strict http://mongoosejs.com/docs/guide.html#strict
* @see response http://docs.mongodb.org/v2.6/reference/command/update/#output
* @param {Object} conditions
* @param {Object} doc
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.update = function update(conditions, doc, options, callback) {
return _update(this, 'update', conditions, doc, options, callback);
};
/**
* Same as `update()`, except MongoDB will update _all_ documents that match
* `criteria` (as opposed to just the first one) regardless of the value of
* the `multi` option.
*
* **Note** updateMany will _not_ fire update middleware. Use `pre('updateMany')`
* and `post('updateMany')` instead.
*
* This function triggers the following middleware.
*
* - `updateMany()`
*
* @param {Object} conditions
* @param {Object} doc
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.updateMany = function updateMany(conditions, doc, options, callback) {
return _update(this, 'updateMany', conditions, doc, options, callback);
};
/**
* Same as `update()`, except it does not support the `multi` or `overwrite`
* options.
*
* - MongoDB will update _only_ the first document that matches `criteria` regardless of the value of the `multi` option.
* - Use `replaceOne()` if you want to overwrite an entire document rather than using atomic operators like `$set`.
*
* This function triggers the following middleware.
*
* - `updateOne()`
*
* @param {Object} conditions
* @param {Object} doc
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.updateOne = function updateOne(conditions, doc, options, callback) {
return _update(this, 'updateOne', conditions, doc, options, callback);
};
/**
* Same as `update()`, except MongoDB replace the existing document with the
* given document (no atomic operators like `$set`).
*
* This function triggers the following middleware.
*
* - `replaceOne()`
*
* @param {Object} conditions
* @param {Object} doc
* @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions)
* @param {Function} [callback]
* @return {Query}
* @api public
*/
Model.replaceOne = function replaceOne(conditions, doc, options, callback) {
const versionKey = get(this, 'schema.options.versionKey', null);
if (versionKey && !doc[versionKey]) {
doc[versionKey] = 0;
}
return _update(this, 'replaceOne', conditions, doc, options, callback);
};
/*!
* Common code for `updateOne()`, `updateMany()`, `replaceOne()`, and `update()`
* because they need to do the same thing
*/
function _update(model, op, conditions, doc, options, callback) {
const mq = new model.Query({}, {}, model, model.collection);
if (callback) {
callback = model.$wrapCallback(callback);
}
// gh-2406
// make local deep copy of conditions
if (conditions instanceof Document) {
conditions = conditions.toObject();
} else {
conditions = utils.clone(conditions);
}
options = typeof options === 'function' ? options : utils.clone(options);
const versionKey = get(model, 'schema.options.versionKey', null);
_decorateUpdateWithVersionKey(doc, options, versionKey);
return mq[op](conditions, doc, options, callback);
}
/**
* Executes a mapReduce command.
*
* `o` is an object specifying all mapReduce options as well as the map and reduce functions. All options are delegated to the driver implementation. See [node-mongodb-native mapReduce() documentation](http://mongodb.github.io/node-mongodb-native/api-generated/collection.html#mapreduce) for more detail about options.
*
* This function does not trigger any middleware.
*
* ####Example:
*
* var o = {};
* // `map()` and `reduce()` are run on the MongoDB server, not Node.js,
* // these functions are converted to strings
* o.map = function () { emit(this.name, 1) };
* o.reduce = function (k, vals) { return vals.length };
* User.mapReduce(o, function (err, results) {
* console.log(results)
* })
*
* ####Other options:
*
* - `query` {Object} query filter object.
* - `sort` {Object} sort input objects using this key
* - `limit` {Number} max number of documents
* - `keeptemp` {Boolean, default:false} keep temporary data
* - `finalize` {Function} finalize function
* - `scope` {Object} scope variables exposed to map/reduce/finalize during execution
* - `jsMode` {Boolean, default:false} it is possible to make the execution stay in JS. Provided in MongoDB > 2.0.X
* - `verbose` {Boolean, default:false} provide statistics on job execution time.
* - `readPreference` {String}
* - `out*` {Object, default: {inline:1}} sets the output target for the map reduce job.
*
* ####* out options:
*
* - `{inline:1}` the results are returned in an array
* - `{replace: 'collectionName'}` add the results to collectionName: the results replace the collection
* - `{reduce: 'collectionName'}` add the results to collectionName: if dups are detected, uses the reducer / finalize functions
* - `{merge: 'collectionName'}` add the results to collectionName: if dups exist the new docs overwrite the old
*
* If `options.out` is set to `replace`, `merge`, or `reduce`, a Model instance is returned that can be used for further querying. Queries run against this model are all executed with the `lean` option; meaning only the js object is returned and no Mongoose magic is applied (getters, setters, etc).
*
* ####Example:
*
* var o = {};
* // You can also define `map()` and `reduce()` as strings if your
* // linter complains about `emit()` not being defined
* o.map = 'function () { emit(this.name, 1) }';
* o.reduce = 'function (k, vals) { return vals.length }';
* o.out = { replace: 'createdCollectionNameForResults' }
* o.verbose = true;
*
* User.mapReduce(o, function (err, model, stats) {
* console.log('map reduce took %d ms', stats.processtime)
* model.find().where('value').gt(10).exec(function (err, docs) {
* console.log(docs);
* });
* })
*
* // `mapReduce()` returns a promise. However, ES6 promises can only
* // resolve to exactly one value,
* o.resolveToObject = true;
* var promise = User.mapReduce(o);
* promise.then(function (res) {
* var model = res.model;
* var stats = res.stats;
* console.log('map reduce took %d ms', stats.processtime)
* return model.find().where('value').gt(10).exec();
* }).then(function (docs) {
* console.log(docs);
* }).then(null, handleError).end()
*
* @param {Object} o an object specifying map-reduce options
* @param {Function} [callback] optional callback
* @see http://www.mongodb.org/display/DOCS/MapReduce
* @return {Promise}
* @api public
*/
Model.mapReduce = function mapReduce(o, callback) {
if (callback) {
callback = this.$wrapCallback(callback);
}
return utils.promiseOrCallback(callback, cb => {
if (!Model.mapReduce.schema) {
const opts = {noId: true, noVirtualId: true, strict: false};
Model.mapReduce.schema = new Schema({}, opts);
}
if (!o.out) o.out = {inline: 1};
if (o.verbose !== false) o.verbose = true;
o.map = String(o.map);
o.reduce = String(o.reduce);
if (o.query) {
let q = new this.Query(o.query);
q.cast(this);
o.query = q._conditions;
q = undefined;
}
this.collection.mapReduce(null, null, o, (err, res) => {
if (err) {
return cb(err);
}
if (res.collection) {
// returned a collection, convert to Model
const model = Model.compile('_mapreduce_' + res.collection.collectionName,
Model.mapReduce.schema, res.collection.collectionName, this.db,
this.base);
model._mapreduce = true;
res.model = model;
return cb(null, res);
}
cb(null, res);
});
}, this.events);
};
/**
* Performs [aggregations](http://docs.mongodb.org/manual/applications/aggregation/) on the models collection.
*
* If a `callback` is passed, the `aggregate` is executed and a `Promise` is returned. If a callback is not passed, the `aggregate` itself is returned.
*
* This function triggers the following middleware.
*
* - `aggregate()`
*
* ####Example:
*
* // Find the max balance of all accounts
* Users.aggregate([
* { $group: { _id: null, maxBalance: { $max: '$balance' }}},
* { $project: { _id: 0, maxBalance: 1 }}
* ]).
* then(function (res) {
* console.log(res); // [ { maxBalance: 98000 } ]
* });
*
* // Or use the aggregation pipeline builder.
* Users.aggregate().
* group({ _id: null, maxBalance: { $max: '$balance' } }).
* project('-id maxBalance').
* exec(function (err, res) {
* if (err) return handleError(err);
* console.log(res); // [ { maxBalance: 98 } ]
* });
*
* ####NOTE:
*
* - Arguments are not cast to the model's schema because `$project` operators allow redefining the "shape" of the documents at any stage of the pipeline, which may leave documents in an incompatible format.
* - The documents returned are plain javascript objects, not mongoose documents (since any shape of document can be returned).
* - Requires MongoDB >= 2.1
*
* @see Aggregate #aggregate_Aggregate
* @see MongoDB http://docs.mongodb.org/manual/applications/aggregation/
* @param {Array} [pipeline] aggregation pipeline as an array of objects
* @param {Function} [callback]
* @return {Aggregate}
* @api public
*/
Model.aggregate = function aggregate(pipeline, callback) {
if (arguments.length > 2 || get(pipeline, 'constructor.name') === 'Object') {
throw new Error('Mongoose 5.x disallows passing a spread of operators ' +
'to `Model.aggregate()`. Instead of ' +
'`Model.aggregate({ $match }, { $skip })`, do ' +
'`Model.aggregate([{ $match }, { $skip }])`');
}
if (typeof pipeline === 'function') {
callback = pipeline;
pipeline = [];
}
const aggregate = new Aggregate(pipeline || []);
aggregate.model(this);
if (typeof callback === 'undefined') {
return aggregate;
}
if (callback) {
callback = this.$wrapCallback(callback);
}
aggregate.exec(callback);
return aggregate;
};
/**
* Implements `$geoSearch` functionality for Mongoose
*
* This function does not trigger any middleware
*
* ####Example:
*
* var options = { near: [10, 10], maxDistance: 5 };
* Locations.geoSearch({ type : "house" }, options, function(err, res) {
* console.log(res);
* });
*
* ####Options:
* - `near` {Array} x,y point to search for
* - `maxDistance` {Number} the maximum distance from the point near that a result can be
* - `limit` {Number} The maximum number of results to return
* - `lean` {Boolean} return the raw object instead of the Mongoose Model
*
* @param {Object} conditions an object that specifies the match condition (required)
* @param {Object} options for the geoSearch, some (near, maxDistance) are required
* @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](http://mongoosejs.com/docs/api.html#query_Query-lean).
* @param {Function} [callback] optional callback
* @return {Promise}
* @see http://docs.mongodb.org/manual/reference/command/geoSearch/
* @see http://docs.mongodb.org/manual/core/geohaystack/
* @api public
*/
Model.geoSearch = function(conditions, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
if (callback) {
callback = this.$wrapCallback(callback);
}
return utils.promiseOrCallback(callback, cb => {
let error;
if (conditions === undefined || !utils.isObject(conditions)) {
error = new Error('Must pass conditions to geoSearch');
} else if (!options.near) {
error = new Error('Must specify the near option in geoSearch');
} else if (!Array.isArray(options.near)) {
error = new Error('near option must be an array [x, y]');
}
if (error) {
return cb(error);
}
// send the conditions in the options object
options.search = conditions;
this.collection.geoHaystackSearch(options.near[0], options.near[1], options, (err, res) => {
if (err) {
return cb(err);
}
let count = res.results.length;
if (options.lean || count === 0) {
return cb(null, res.results);
}
const errSeen = false;
function init(err) {
if (err && !errSeen) {
return cb(err);
}
if (!--count && !errSeen) {
cb(null, res.results);
}
}
for (let i = 0; i < res.results.length; ++i) {
const temp = res.results[i];
res.results[i] = new this();
res.results[i].init(temp, {}, init);
}
});
}, this.events);
};
/**
* Populates document references.
*
* ####Available top-level options:
*
* - path: space delimited path(s) to populate
* - select: optional fields to select
* - match: optional query conditions to match
* - model: optional name of the model to use for population
* - options: optional query options like sort, limit, etc
* - justOne: optional boolean, if true Mongoose will always set `path` to an array. Inferred from schema by default.
*
* ####Examples:
*
* // populates a single object
* User.findById(id, function (err, user) {
* var opts = [
* { path: 'company', match: { x: 1 }, select: 'name' }
* , { path: 'notes', options: { limit: 10 }, model: 'override' }
* ]
*
* User.populate(user, opts, function (err, user) {
* console.log(user);
* });
* });
*
* // populates an array of objects
* User.find(match, function (err, users) {
* var opts = [{ path: 'company', match: { x: 1 }, select: 'name' }]
*
* var promise = User.populate(users, opts);
* promise.then(console.log).end();
* })
*
* // imagine a Weapon model exists with two saved documents:
* // { _id: 389, name: 'whip' }
* // { _id: 8921, name: 'boomerang' }
* // and this schema:
* // new Schema({
* // name: String,
* // weapon: { type: ObjectId, ref: 'Weapon' }
* // });
*
* var user = { name: 'Indiana Jones', weapon: 389 }
* Weapon.populate(user, { path: 'weapon', model: 'Weapon' }, function (err, user) {
* console.log(user.weapon.name) // whip
* })
*
* // populate many plain objects
* var users = [{ name: 'Indiana Jones', weapon: 389 }]
* users.push({ name: 'Batman', weapon: 8921 })
* Weapon.populate(users, { path: 'weapon' }, function (err, users) {
* users.forEach(function (user) {
* console.log('%s uses a %s', users.name, user.weapon.name)
* // Indiana Jones uses a whip
* // Batman uses a boomerang
* });
* });
* // Note that we didn't need to specify the Weapon model because
* // it is in the schema's ref
*
* @param {Document|Array} docs Either a single document or array of documents to populate.
* @param {Object} options A hash of key/val (path, options) used for population.
* @param {boolean} [options.retainNullValues=false] by default, Mongoose removes null and undefined values from populated arrays. Use this option to make `populate()` retain `null` and `undefined` array entries.
* @param {boolean} [options.getters=false] if true, Mongoose will call any getters defined on the `localField`. By default, Mongoose gets the raw value of `localField`. For example, you would need to set this option to `true` if you wanted to [add a `lowercase` getter to your `localField`](/docs/schematypes.html#schematype-options).
* @param {boolean} [options.clone=false] When you do `BlogPost.find().populate('author')`, blog posts with the same author will share 1 copy of an `author` doc. Enable this option to make Mongoose clone populated docs before assigning them.
* @param {Function} [callback(err,doc)] Optional callback, executed upon completion. Receives `err` and the `doc(s)`.
* @return {Promise}
* @api public
*/
Model.populate = function(docs, paths, callback) {
const _this = this;
if (callback) {
callback = this.$wrapCallback(callback);
}
// normalized paths
paths = utils.populate(paths);
// data that should persist across subPopulate calls
const cache = {};
return utils.promiseOrCallback(callback, cb => {
_populate(_this, docs, paths, cache, cb);
}, this.events);
};
/*!
* Populate helper
*
* @param {Model} model the model to use
* @param {Document|Array} docs Either a single document or array of documents to populate.
* @param {Object} paths
* @param {Function} [cb(err,doc)] Optional callback, executed upon completion. Receives `err` and the `doc(s)`.
* @return {Function}
* @api private
*/
function _populate(model, docs, paths, cache, callback) {
let pending = paths.length;
if (pending === 0) {
return callback(null, docs);
}
// each path has its own query options and must be executed separately
let i = pending;
let path;
while (i--) {
path = paths[i];
populate(model, docs, path, next);
}
function next(err) {
if (err) {
return callback(err, null);
}
if (--pending) {
return;
}
callback(null, docs);
}
}
/*!
* Populates `docs`
*/
const excludeIdReg = /\s?-_id\s?/;
const excludeIdRegGlobal = /\s?-_id\s?/g;
function populate(model, docs, options, callback) {
// normalize single / multiple docs passed
if (!Array.isArray(docs)) {
docs = [docs];
}
if (docs.length === 0 || docs.every(utils.isNullOrUndefined)) {
return callback();
}
const modelsMap = getModelsMapForPopulate(model, docs, options);
if (modelsMap instanceof Error) {
return immediate(function() {
callback(modelsMap);
});
}
const len = modelsMap.length;
let mod;
let match;
let select;
let vals = [];
function flatten(item) {
// no need to include undefined values in our query
return undefined !== item;
}
let _remaining = len;
let hasOne = false;
for (let i = 0; i < len; ++i) {
mod = modelsMap[i];
select = mod.options.select;
if (mod.options.match) {
match = utils.object.shallowCopy(mod.options.match);
} else if (get(mod, 'options.options.match')) {
match = utils.object.shallowCopy(mod.options.options.match);
delete mod.options.options.match;
} else {
match = {};
}
let ids = utils.array.flatten(mod.ids, flatten);
ids = utils.array.unique(ids);
if (ids.length === 0 || ids.every(utils.isNullOrUndefined)) {
--_remaining;
continue;
}
hasOne = true;
if (mod.foreignField.size === 1) {
const foreignField = Array.from(mod.foreignField)[0];
if (foreignField !== '_id' || !match['_id']) {
match[foreignField] = { $in: ids };
}
} else {
match.$or = [];
for (const foreignField of mod.foreignField) {
if (foreignField !== '_id' || !match['_id']) {
match.$or.push({ [foreignField]: { $in: ids } });
}
}
}
const assignmentOpts = {};
assignmentOpts.sort = get(mod, 'options.options.sort', void 0);
assignmentOpts.excludeId = excludeIdReg.test(select) || (select && select._id === 0);
if (assignmentOpts.excludeId) {
// override the exclusion from the query so we can use the _id
// for document matching during assignment. we'll delete the
// _id back off before returning the result.
if (typeof select === 'string') {
select = select.replace(excludeIdRegGlobal, ' ');
} else {
// preserve original select conditions by copying
select = utils.object.shallowCopy(select);
delete select._id;
}
}
// If just setting count, skip everything else
if (mod.count) {
mod.model.countDocuments(match, function(err, count) {
if (err != null) {
return callback(err);
}
for (const doc of docs) {
try {
if (doc.$__ != null) {
doc.set(mod.options.path, count);
} else {
utils.setValue(mod.options.path, count, doc);
}
} catch (err) {
return callback(err);
}
}
callback(null);
});
continue;
}
if (mod.options.options && mod.options.options.limit) {
assignmentOpts.originalLimit = mod.options.options.limit;
mod.options.options.limit = mod.options.options.limit * ids.length;
}
const subPopulate = utils.clone(mod.options.populate);
const query = mod.model.find(match, select, mod.options.options);
// If we're doing virtual populate and projection is inclusive and foreign
// field is not selected, automatically select it because mongoose needs it.
// If projection is exclusive and client explicitly unselected the foreign
// field, that's the client's fault.
for (const foreignField of mod.foreignField) {
if (foreignField !== '_id' && query.selectedInclusively() &&
!isPathSelectedInclusive(query._fields, foreignField)) {
query.select(foreignField);
}
}
// If we need to sub-populate, call populate recursively
if (subPopulate) {
query.populate(subPopulate);
}
query.exec(next.bind(this, mod, assignmentOpts));
}
if (!hasOne) {
return callback();
}
function next(options, assignmentOpts, err, valsFromDb) {
if (mod.options.options && mod.options.options.limit) {
mod.options.options.limit = assignmentOpts.originalLimit;
}
if (err) return callback(err, null);
vals = vals.concat(valsFromDb);
_assign(null, vals, options, assignmentOpts);
if (--_remaining === 0) {
callback();
}
}
function _assign(err, vals, mod, assignmentOpts) {
if (err) {
return callback(err, null);
}
const options = mod.options;
const isVirtual = mod.isVirtual;
const justOne = mod.justOne;
let _val;
const lean = options.options && options.options.lean;
const len = vals.length;
const rawOrder = {};
const rawDocs = {};
let key;
let val;
// Clone because `assignRawDocsToIdStructure` will mutate the array
const allIds = utils.clone(mod.allIds);
// optimization:
// record the document positions as returned by
// the query result.
for (let i = 0; i < len; i++) {
val = vals[i];
if (val == null) {
continue;
}
for (const foreignField of mod.foreignField) {
_val = utils.getValue(foreignField, val);
if (Array.isArray(_val)) {
_val = utils.array.flatten(_val);
const _valLength = _val.length;
for (let j = 0; j < _valLength; ++j) {
let __val = _val[j];
if (__val instanceof Document) {
__val = __val._id;
}
key = String(__val);
if (rawDocs[key]) {
if (Array.isArray(rawDocs[key])) {
rawDocs[key].push(val);
rawOrder[key].push(i);
} else {
rawDocs[key] = [rawDocs[key], val];
rawOrder[key] = [rawOrder[key], i];
}
} else {
if (isVirtual && !justOne) {
rawDocs[key] = [val];
rawOrder[key] = [i];
} else {
rawDocs[key] = val;
rawOrder[key] = i;
}
}
}
} else {
if (_val instanceof Document) {
_val = _val._id;
}
key = String(_val);
if (rawDocs[key]) {
if (Array.isArray(rawDocs[key])) {
rawDocs[key].push(val);
rawOrder[key].push(i);
} else {
rawDocs[key] = [rawDocs[key], val];
rawOrder[key] = [rawOrder[key], i];
}
} else {
rawDocs[key] = val;
rawOrder[key] = i;
}
}
// flag each as result of population
if (lean) {
leanPopulateMap.set(val, mod.model);
} else {
val.$__.wasPopulated = true;
}
}
}
assignVals({
originalModel: model,
// If virtual, make sure to not mutate original field
rawIds: mod.isVirtual ? allIds : mod.allIds,
allIds: allIds,
foreignField: mod.foreignField,
rawDocs: rawDocs,
rawOrder: rawOrder,
docs: mod.docs,
path: options.path,
options: assignmentOpts,
justOne: mod.justOne,
isVirtual: mod.isVirtual,
allOptions: mod,
lean: lean,
virtual: mod.virtual
});
}
}
/*!
* Assigns documents returned from a population query back
* to the original document path.
*/
function assignVals(o) {
// Options that aren't explicitly listed in `populateOptions`
const userOptions = get(o, 'allOptions.options.options');
// `o.options` contains options explicitly listed in `populateOptions`, like
// `match` and `limit`.
const populateOptions = Object.assign({}, o.options, userOptions, {
justOne: o.justOne
});
const originalIds = [].concat(o.rawIds);
// replace the original ids in our intermediate _ids structure
// with the documents found by query
assignRawDocsToIdStructure(o.rawIds, o.rawDocs, o.rawOrder, populateOptions);
// now update the original documents being populated using the
// result structure that contains real documents.
const docs = o.docs;
const rawIds = o.rawIds;
const options = o.options;
function setValue(val) {
return valueFilter(val, options, populateOptions);
}
for (let i = 0; i < docs.length; ++i) {
const existingVal = utils.getValue(o.path, docs[i]);
if (existingVal == null && !getVirtual(o.originalModel.schema, o.path)) {
continue;
}
// If we're populating a map, the existing value will be an object, so
// we need to transform again
const originalSchema = o.originalModel.schema;
let isMap = isModel(docs[i]) ?
existingVal instanceof Map :
utils.isPOJO(existingVal);
// If we pass the first check, also make sure the local field's schematype
// is map (re: gh-6460)
isMap = isMap && get(originalSchema._getSchema(o.path), '$isSchemaMap');
if (!o.isVirtual && isMap) {
const _keys = existingVal instanceof Map ?
Array.from(existingVal.keys()) :
Object.keys(existingVal);
rawIds[i] = rawIds[i].reduce((cur, v, i) => {
// Avoid casting because that causes infinite recursion
cur.$init(_keys[i], v);
return cur;
}, new MongooseMap({}, docs[i]));
}
if (o.isVirtual && docs[i] instanceof Model) {
docs[i].populated(o.path, o.justOne ? originalIds[0] : originalIds, o.allOptions);
// If virtual populate and doc is already init-ed, need to walk through
// the actual doc to set rather than setting `_doc` directly
mpath.set(o.path, rawIds[i], docs[i], setValue);
continue;
}
const parts = o.path.split('.');
let cur = docs[i];
for (let j = 0; j < parts.length - 1; ++j) {
if (cur[parts[j]] == null) {
cur[parts[j]] = {};
}
cur = cur[parts[j]];
}
if (docs[i].$__) {
docs[i].populated(o.path, o.allIds[i], o.allOptions);
}
// If lean, need to check that each individual virtual respects
// `justOne`, because you may have a populated virtual with `justOne`
// underneath an array. See gh-6867
utils.setValue(o.path, rawIds[i], docs[i], function(v) {
if (o.justOne === true && Array.isArray(v)) {
return setValue(v[0]);
} else if (o.justOne === false && !Array.isArray(v)) {
return setValue([v]);
}
return setValue(v);
}, false);
}
}
/*!
* Check if obj is a document
*/
function isModel(obj) {
return get(obj, '$__') != null;
}
function getModelsMapForPopulate(model, docs, options) {
let i;
let doc;
const len = docs.length;
const available = {};
const map = [];
const modelNameFromQuery = options.model && options.model.modelName || options.model;
let schema;
let refPath;
let Model;
let currentOptions;
let modelNames;
let modelName;
let discriminatorKey;
let modelForFindSchema;
const originalModel = options.model;
let isVirtual = false;
const modelSchema = model.schema;
for (i = 0; i < len; i++) {
doc = docs[i];
schema = getSchemaTypes(modelSchema, doc, options.path);
const isUnderneathDocArray = schema && schema.$isUnderneathDocArray;
if (isUnderneathDocArray && get(options, 'options.sort') != null) {
return new Error('Cannot populate with `sort` on path ' + options.path +
' because it is a subproperty of a document array');
}
modelNames = null;
let isRefPath = false;
if (Array.isArray(schema)) {
for (let j = 0; j < schema.length; ++j) {
let _modelNames;
try {
const res = _getModelNames(doc, schema[j]);
_modelNames = res.modelNames;
isRefPath = res.isRefPath;
} catch (error) {
return error;
}
if (!_modelNames) {
continue;
}
modelNames = modelNames || [];
for (let x = 0; x < _modelNames.length; ++x) {
if (modelNames.indexOf(_modelNames[x]) === -1) {
modelNames.push(_modelNames[x]);
}
}
}
} else {
try {
const res = _getModelNames(doc, schema);
modelNames = res.modelNames;
isRefPath = res.isRefPath;
} catch (error) {
return error;
}
if (!modelNames) {
continue;
}
}
const virtual = getVirtual(model.schema, options.path);
let localField;
let count = false;
if (virtual && virtual.options) {
const virtualPrefix = virtual.$nestedSchemaPath ?
virtual.$nestedSchemaPath + '.' : '';
if (typeof virtual.options.localField === 'function') {
localField = virtualPrefix + virtual.options.localField.call(doc, doc);
} else {
localField = virtualPrefix + virtual.options.localField;
}
count = virtual.options.count;
} else {
localField = options.path;
}
let foreignField = virtual && virtual.options ?
virtual.options.foreignField :
'_id';
// `justOne = null` means we don't know from the schema whether the end
// result should be an array or a single doc. This can result from
// populating a POJO using `Model.populate()`
let justOne = null;
if ('justOne' in options) {
justOne = options.justOne;
} else if (virtual && virtual.options && virtual.options.ref) {
let normalizedRef;
if (typeof virtual.options.ref === 'function') {
normalizedRef = virtual.options.ref.call(doc, doc);
} else {
normalizedRef = virtual.options.ref;
}
justOne = !!virtual.options.justOne;
isVirtual = true;
if (!modelNames) {
modelNames = [].concat(normalizedRef);
}
} else if (schema && !schema[schemaMixedSymbol]) {
// Skip Mixed types because we explicitly don't do casting on those.
justOne = !schema.$isMongooseArray;
}
if (!modelNames) {
continue;
}
if (virtual && (!localField || !foreignField)) {
return new Error('If you are populating a virtual, you must set the ' +
'localField and foreignField options');
}
options.isVirtual = isVirtual;
options.virtual = virtual;
if (typeof localField === 'function') {
localField = localField.call(doc, doc);
}
if (typeof foreignField === 'function') {
foreignField = foreignField.call(doc);
}
const localFieldPath = modelSchema.paths[localField];
const localFieldGetters = localFieldPath ? localFieldPath.getters : [];
let ret;
const _populateOptions = get(options, 'options', {});
const getters = 'getters' in _populateOptions ?
_populateOptions.getters :
options.isVirtual && get(virtual, 'options.getters', false);
if (localFieldGetters.length > 0 && getters) {
const hydratedDoc = (doc.$__ != null) ? doc : model.hydrate(doc);
ret = localFieldPath.applyGetters(doc[localField], hydratedDoc);
} else {
ret = convertTo_id(utils.getValue(localField, doc));
}
const id = String(utils.getValue(foreignField, doc));
options._docs[id] = Array.isArray(ret) ? ret.slice() : ret;
let k = modelNames.length;
while (k--) {
modelName = modelNames[k];
if (modelName == null) {
continue;
}
try {
Model = originalModel && originalModel[modelSymbol] ?
originalModel :
modelName[modelSymbol] ? modelName : model.db.model(modelName);
} catch (error) {
return error;
}
let ids = ret;
const flat = Array.isArray(ret) ? utils.array.flatten(ret) : [];
if (isRefPath && Array.isArray(ret) && flat.length === modelNames.length) {
ids = flat.filter((val, i) => modelNames[i] === modelName);
}
if (!available[modelName]) {
currentOptions = {
model: Model
};
if (isVirtual && virtual.options && virtual.options.options) {
currentOptions.options = utils.clone(virtual.options.options);
}
utils.merge(currentOptions, options);
if (schema && !discriminatorKey) {
currentOptions.model = Model;
}
options.model = Model;
available[modelName] = {
model: Model,
options: currentOptions,
docs: [doc],
ids: [ids],
allIds: [ret],
localField: new Set([localField]),
foreignField: new Set([foreignField]),
justOne: justOne,
isVirtual: isVirtual,
virtual: virtual,
count: count
};
map.push(available[modelName]);
} else {
available[modelName].localField.add(localField);
available[modelName].foreignField.add(foreignField);
available[modelName].docs.push(doc);
available[modelName].ids.push(ids);
available[modelName].allIds.push(ret);
}
}
}
function _getModelNames(doc, schema) {
let modelNames;
let discriminatorKey;
let isRefPath = false;
if (schema && schema.caster) {
schema = schema.caster;
}
if (schema && schema.$isSchemaMap) {
schema = schema.$__schemaType;
}
if (!schema && model.discriminators) {
discriminatorKey = model.schema.discriminatorMapping.key;
}
refPath = schema && schema.options && schema.options.refPath;
const normalizedRefPath = normalizeRefPath(refPath, doc, options.path);
if (modelNameFromQuery) {
modelNames = [modelNameFromQuery]; // query options
} else if (normalizedRefPath) {
if (options._queryProjection != null && isPathExcluded(options._queryProjection, normalizedRefPath)) {
throw new Error('refPath `' + normalizedRefPath +
'` must not be excluded in projection, got ' +
util.inspect(options._queryProjection));
}
modelNames = utils.getValue(normalizedRefPath, doc);
if (Array.isArray(modelNames)) {
modelNames = utils.array.flatten(modelNames);
}
isRefPath = true;
} else {
let modelForCurrentDoc = model;
let schemaForCurrentDoc;
if (!schema && discriminatorKey) {
modelForFindSchema = utils.getValue(discriminatorKey, doc);
if (modelForFindSchema) {
try {
modelForCurrentDoc = model.db.model(modelForFindSchema);
} catch (error) {
return error;
}
schemaForCurrentDoc = modelForCurrentDoc.schema._getSchema(options.path);
if (schemaForCurrentDoc && schemaForCurrentDoc.caster) {
schemaForCurrentDoc = schemaForCurrentDoc.caster;
}
}
} else {
schemaForCurrentDoc = schema;
}
const virtual = getVirtual(modelForCurrentDoc.schema, options.path);
let ref;
if ((ref = get(schemaForCurrentDoc, 'options.ref')) != null) {
modelNames = [ref];
} else if ((ref = get(virtual, 'options.ref')) != null) {
if (typeof ref === 'function') {
ref = ref.call(doc, doc);
}
// When referencing nested arrays, the ref should be an Array
// of modelNames.
if (Array.isArray(ref)) {
modelNames = ref;
} else {
modelNames = [ref];
}
isVirtual = true;
} else {
// We may have a discriminator, in which case we don't want to
// populate using the base model by default
modelNames = discriminatorKey ? null : [model.modelName];
}
}
if (!modelNames) {
return { modelNames: modelNames, isRefPath: isRefPath };
}
if (!Array.isArray(modelNames)) {
modelNames = [modelNames];
}
return { modelNames: modelNames, isRefPath: isRefPath };
}
return map;
}
/*!
* Retrieve the _id of `val` if a Document or Array of Documents.
*
* @param {Array|Document|Any} val
* @return {Array|Document|Any}
*/
function convertTo_id(val) {
if (val instanceof Model) return val._id;
if (Array.isArray(val)) {
for (let i = 0; i < val.length; ++i) {
if (val[i] instanceof Model) {
val[i] = val[i]._id;
}
}
if (val.isMongooseArray && val._schema) {
return val._schema.cast(val, val._parent);
}
return [].concat(val);
}
// `populate('map')` may be an object if populating on a doc that hasn't
// been hydrated yet
if (val != null && val.constructor.name === 'Object') {
const ret = [];
for (const key of Object.keys(val)) {
ret.push(val[key]);
}
return ret;
}
// If doc has already been hydrated, e.g. `doc.populate('map').execPopulate()`
// then `val` will already be a map
if (val instanceof Map) {
return Array.from(val.values());
}
return val;
}
/*!
* 1) Apply backwards compatible find/findOne behavior to sub documents
*
* find logic:
* a) filter out non-documents
* b) remove _id from sub docs when user specified
*
* findOne
* a) if no doc found, set to null
* b) remove _id from sub docs when user specified
*
* 2) Remove _ids when specified by users query.
*
* background:
* _ids are left in the query even when user excludes them so
* that population mapping can occur.
*/
function valueFilter(val, assignmentOpts, populateOptions) {
if (Array.isArray(val)) {
// find logic
const ret = [];
const numValues = val.length;
for (let i = 0; i < numValues; ++i) {
const subdoc = val[i];
if (!isPopulatedObject(subdoc) && (!populateOptions.retainNullValues || subdoc != null)) {
continue;
}
maybeRemoveId(subdoc, assignmentOpts);
ret.push(subdoc);
if (assignmentOpts.originalLimit &&
ret.length >= assignmentOpts.originalLimit) {
break;
}
}
// Since we don't want to have to create a new mongoosearray, make sure to
// modify the array in place
while (val.length > ret.length) {
Array.prototype.pop.apply(val, []);
}
for (let i = 0; i < ret.length; ++i) {
val[i] = ret[i];
}
return val;
}
// findOne
if (isPopulatedObject(val)) {
maybeRemoveId(val, assignmentOpts);
return val;
}
if (populateOptions.justOne === true) {
return (val == null ? val : null);
}
if (populateOptions.justOne === false) {
return [];
}
return val;
}
/*!
* Remove _id from `subdoc` if user specified "lean" query option
*/
function maybeRemoveId(subdoc, assignmentOpts) {
if (assignmentOpts.excludeId) {
if (typeof subdoc.setValue === 'function') {
delete subdoc._doc._id;
} else {
delete subdoc._id;
}
}
}
/*!
* Determine if `obj` is something we can set a populated path to. Can be a
* document, a lean document, or an array/map that contains docs.
*/
function isPopulatedObject(obj) {
if (obj == null) {
return false;
}
return Array.isArray(obj) ||
obj.$isMongooseMap ||
obj.$__ != null ||
leanPopulateMap.has(obj);
}
/*!
* Compiler utility.
*
* @param {String|Function} name model name or class extending Model
* @param {Schema} schema
* @param {String} collectionName
* @param {Connection} connection
* @param {Mongoose} base mongoose instance
*/
Model.compile = function compile(name, schema, collectionName, connection, base) {
const versioningEnabled = schema.options.versionKey !== false;
setParentPointers(schema);
if (versioningEnabled && !schema.paths[schema.options.versionKey]) {
// add versioning to top level documents only
const o = {};
o[schema.options.versionKey] = Number;
schema.add(o);
}
let model;
if (typeof name === 'function' && name.prototype instanceof Model) {
model = name;
name = model.name;
schema.loadClass(model, false);
model.prototype.$isMongooseModelPrototype = true;
} else {
// generate new class
model = function model(doc, fields, skipId) {
model.hooks.execPreSync('createModel', doc);
if (!(this instanceof model)) {
return new model(doc, fields, skipId);
}
Model.call(this, doc, fields, skipId);
};
}
model.hooks = schema.s.hooks.clone();
model.base = base;
model.modelName = name;
if (!(model.prototype instanceof Model)) {
model.__proto__ = Model;
model.prototype.__proto__ = Model.prototype;
}
model.model = Model.prototype.model;
model.db = model.prototype.db = connection;
model.discriminators = model.prototype.discriminators = undefined;
model[modelSymbol] = true;
model.events = new EventEmitter();
model.prototype.$__setSchema(schema);
const _userProvidedOptions = schema._userProvidedOptions || {};
// `bufferCommands` is true by default...
let bufferCommands = true;
// First, take the global option
if (connection.base.get('bufferCommands') != null) {
bufferCommands = connection.base.get('bufferCommands');
}
// Connection-specific overrides the global option
if (connection.config.bufferCommands != null) {
bufferCommands = connection.config.bufferCommands;
}
// And schema options override global and connection
if (_userProvidedOptions.bufferCommands != null) {
bufferCommands = _userProvidedOptions.bufferCommands;
}
const collectionOptions = {
bufferCommands: bufferCommands,
capped: schema.options.capped
};
model.prototype.collection = connection.collection(
collectionName,
collectionOptions
);
model.prototype[modelCollectionSymbol] = model.prototype.collection;
// apply methods and statics
applyMethods(model, schema);
applyStatics(model, schema);
applyHooks(model, schema);
model.schema = model.prototype.schema;
model.collection = model.prototype.collection;
// Create custom query constructor
model.Query = function() {
Query.apply(this, arguments);
};
model.Query.prototype = Object.create(Query.prototype);
model.Query.base = Query.base;
applyQueryMiddleware(model.Query, model);
applyQueryMethods(model, schema.query);
const kareemOptions = {
useErrorHandlers: true,
numCallbackParams: 1
};
model.$__insertMany = model.hooks.createWrapper('insertMany',
model.$__insertMany, model, kareemOptions);
return model;
};
/*!
* Register custom query methods for this model
*
* @param {Model} model
* @param {Schema} schema
*/
function applyQueryMethods(model, methods) {
for (const i in methods) {
model.Query.prototype[i] = methods[i];
}
}
/*!
* Subclass this model with `conn`, `schema`, and `collection` settings.
*
* @param {Connection} conn
* @param {Schema} [schema]
* @param {String} [collection]
* @return {Model}
*/
Model.__subclass = function subclass(conn, schema, collection) {
// subclass model using this connection and collection name
const _this = this;
const Model = function Model(doc, fields, skipId) {
if (!(this instanceof Model)) {
return new Model(doc, fields, skipId);
}
_this.call(this, doc, fields, skipId);
};
Model.__proto__ = _this;
Model.prototype.__proto__ = _this.prototype;
Model.db = Model.prototype.db = conn;
const s = schema && typeof schema !== 'string'
? schema
: _this.prototype.schema;
const options = s.options || {};
const _userProvidedOptions = s._userProvidedOptions || {};
if (!collection) {
collection = _this.prototype.schema.get('collection') ||
utils.toCollectionName(_this.modelName, this.base.pluralize());
}
let bufferCommands = true;
if (s) {
if (conn.config.bufferCommands != null) {
bufferCommands = conn.config.bufferCommands;
}
if (_userProvidedOptions.bufferCommands != null) {
bufferCommands = _userProvidedOptions.bufferCommands;
}
}
const collectionOptions = {
bufferCommands: bufferCommands,
capped: s && options.capped
};
Model.prototype.collection = conn.collection(collection, collectionOptions);
Model.prototype[modelCollectionSymbol] = Model.prototype.collection;
Model.collection = Model.prototype.collection;
// Errors handled internally, so ignore
Model.init(() => {});
return Model;
};
Model.$wrapCallback = function(callback) {
if (callback == null) {
return callback;
}
if (typeof callback !== 'function') {
throw new Error('Callback must be a function, got ' + callback);
}
const _this = this;
return function() {
try {
callback.apply(null, arguments);
} catch (error) {
_this.emit('error', error);
}
};
};
/*!
* Module exports.
*/
module.exports = exports = Model;
| 1 | 13,989 | There's an awful lot of test failures here because `localFieldPathType.schema` may not contain a `getters` array. | Automattic-mongoose | js |
@@ -56,7 +56,13 @@ class User < ActiveRecord::Base
scope :search, -> (term) {
search_pattern = "%#{term}%"
- where("firstname LIKE ? OR surname LIKE ? OR email LIKE ?", search_pattern, search_pattern, search_pattern)
+ # MySQL does not support standard string concatenation and since concat_ws or concat functions do
+ # not exist for sqlite, we have to come up with this conditional
+ if ActiveRecord::Base.connection.adapter_name == "Mysql2"
+ where("concat_ws(' ', firstname, surname) LIKE ? OR email LIKE ?", search_pattern, search_pattern)
+ else
+ where("firstname || ' ' || surname LIKE ? OR email LIKE ?", search_pattern, search_pattern)
+ end
}
# EVALUATE CLASS AND INSTANCE METHODS BELOW | 1 | class User < ActiveRecord::Base
include ConditionalUserMailer
##
# Devise
# Include default devise modules. Others available are:
# :token_authenticatable, :confirmable,
# :lockable, :timeoutable and :omniauthable
devise :invitable, :database_authenticatable, :registerable, :recoverable,
:rememberable, :trackable, :validatable, :omniauthable,
:omniauth_providers => [:shibboleth, :orcid]
##
# User Notification Preferences
serialize :prefs, Hash
##
# Associations
has_and_belongs_to_many :perms, join_table: :users_perms
belongs_to :language
belongs_to :org
has_one :pref
has_many :answers
has_many :notes
has_many :exported_plans
has_many :roles, dependent: :destroy
has_many :plans, through: :roles do
def filter(query)
return self unless query.present?
t = self.arel_table
q = "%#{query}%"
conditions = t[:title].matches(q)
columns = %i(
grant_number identifier description principal_investigator data_contact
)
columns = ['grant_number', 'identifier', 'description', 'principal_investigator', 'data_contact']
columns.each {|col| conditions = conditions.or(t[col].matches(q)) }
self.where(conditions)
end
end
has_many :user_identifiers
has_many :identifier_schemes, through: :user_identifiers
validates :email, email: true, allow_nil: true, uniqueness: {message: _("must be unique")}
##
# Scopes
default_scope { includes(:org, :perms) }
# Retrieves all of the org_admins for the specified org
scope :org_admins, -> (org_id) {
joins(:perms).where("users.org_id = ? AND perms.name IN (?)", org_id,
['grant_permissions', 'modify_templates', 'modify_guidance', 'change_org_details'])
}
scope :search, -> (term) {
search_pattern = "%#{term}%"
where("firstname LIKE ? OR surname LIKE ? OR email LIKE ?", search_pattern, search_pattern, search_pattern)
}
# EVALUATE CLASS AND INSTANCE METHODS BELOW
#
# What do they do? do they do it efficiently, and do we need them?
# Determines the locale set for the user or the organisation he/she belongs
# @return String or nil
def get_locale
if !self.language.nil?
return self.language.abbreviation
elsif !self.org.nil?
return self.org.get_locale
else
return nil
end
end
##
# gives either the name of the user, or the email if name unspecified
#
# @param user_email [Boolean] defaults to true, allows the use of email if there is no firstname or surname
# @return [String] the email or the firstname and surname of the user
def name(use_email = true)
if (firstname.blank? && surname.blank?) || use_email then
return email
else
name = "#{firstname} #{surname}"
return name.strip
end
end
##
# returns all active plans for a user
#
# @return [Plans]
def active_plans
self.plans.includes(:template).where("roles.active": true).where(Role.not_reviewer_condition)
end
##
# Returns the user's identifier for the specified scheme name
#
# @param the identifier scheme name (e.g. ORCID)
# @return [UserIdentifier] the user's identifier for that scheme
def identifier_for(scheme)
user_identifiers.where(identifier_scheme: scheme).first
end
# TODO: Check the logic here. Its deleting the permissions if the user does not have permission
# to change orgs and either the incoming or existing org is nil.
# We should also NOT be auto-saving here!!!
##
# sets a new organisation id for the user
# if the user has any perms such as org_admin or admin, those are removed
# if the user had an api_token, that is removed
#
# @param new_organisation_id [Integer] the id for an organisation
# @return [String] the empty string as a causality of setting api_token
def org_id=(new_org_id)
unless self.can_change_org? || new_org_id.nil? || self.org.nil? || (new_org_id.to_s == self.org.id.to_s)
# rip all permissions from the user
self.perms.delete_all
end
# set the user's new organisation
super(new_org_id)
self.save!
# rip api permissions from the user
self.remove_token!
end
##
# sets a new organisation for the user
#
# @param new_organisation [Organisation] the new organisation for the user
def organisation=(new_org)
org_id = new_org.id unless new_org.nil?
end
##
# checks if the user is a super admin
# if the user has any privelege which requires them to see the super admin page
# then they are a super admin
#
# @return [Boolean] true if the user is an admin
def can_super_admin?
return self.can_add_orgs? || self.can_grant_api_to_orgs? || self.can_change_org?
end
##
# checks if the user is an organisation admin
# if the user has any privlege which requires them to see the org-admin pages
# then they are an org admin
#
# @return [Boolean] true if the user is an organisation admin
def can_org_admin?
return self.can_grant_permissions? || self.can_modify_guidance? ||
self.can_modify_templates? || self.can_modify_org_details?
end
##
# checks if the user can add new organisations
#
# @return [Boolean] true if the user can add new organisations
def can_add_orgs?
perms.include? Perm.add_orgs
end
##
# checks if the user can change their organisation affiliations
#
# @return [Boolean] true if the user can change their organisation affiliations
def can_change_org?
perms.include? Perm.change_affiliation
end
##
# checks if the user can grant their permissions to others
#
# @return [Boolean] true if the user can grant their permissions to others
def can_grant_permissions?
perms.include? Perm.grant_permissions
end
##
# checks if the user can modify organisation templates
#
# @return [Boolean] true if the user can modify organisation templates
def can_modify_templates?
self.perms.include? Perm.modify_templates
end
##
# checks if the user can modify organisation guidance
#
# @return [Boolean] true if the user can modify organistion guidance
def can_modify_guidance?
perms.include? Perm.modify_guidance
end
##
# checks if the user can use the api
#
# @return [Boolean] true if the user can use the api
def can_use_api?
perms.include? Perm.use_api
end
##
# checks if the user can modify their org's details
#
# @return [Boolean] true if the user can modify the org's details
def can_modify_org_details?
perms.include? Perm.change_org_details
end
##
# checks if the user can grant the api to organisations
#
# @return [Boolean] true if the user can grant api permissions to organisations
def can_grant_api_to_orgs?
perms.include? Perm.grant_api
end
##
# removes the api_token from the user
# modifies the user model
def remove_token!
unless api_token.blank?
self.api_token = ""
self.save!
end
end
##
# generates a new token for the user unless the user already has a token.
# modifies the user's model.
def keep_or_generate_token!
if api_token.nil? || api_token.empty?
self.api_token = loop do
random_token = SecureRandom.urlsafe_base64(nil, false)
break random_token unless User.exists?(api_token: random_token)
end
self.save!
deliver_if(recipients: self, key: 'users.admin_privileges') do |r|
UserMailer.api_token_granted_notification(r).deliver_now
end
end
end
##
# Load the user based on the scheme and id provided by the Omniauth call
# --------------------------------------------------------------
def self.from_omniauth(auth)
scheme = IdentifierScheme.find_by(name: auth.provider.downcase)
if scheme.nil?
throw Exception.new('Unknown OAuth provider: ' + auth.provider)
else
joins(:user_identifiers).where('user_identifiers.identifier': auth.uid,
'user_identifiers.identifier_scheme_id': scheme.id).first
end
end
##
# Return the user's preferences for a given base key
#
# @return [JSON] with symbols as keys
def get_preferences(key)
defaults = Pref.default_settings[key.to_sym] || Pref.default_settings[key.to_s]
if self.pref.present?
existing = self.pref.settings[key.to_s].deep_symbolize_keys
# Check for new preferences
defaults.keys.each do |grp|
defaults[grp].keys.each do |pref, v|
# If the group isn't present in the saved values add all of it's preferences
existing[grp] = defaults[grp] if existing[grp].nil?
# If the preference isn't present in the saved values add the default
existing[grp][pref] = defaults[grp][pref] if existing[grp][pref].nil?
end
end
existing
else
defaults
end
end
##
# Override devise_invitable email title
# --------------------------------------------------------------
def deliver_invitation(options = {})
super(options.merge(subject: _('A Data Management Plan in %{application_name} has been shared with you') % {application_name: Rails.configuration.branding[:application][:name]}))
end
##
# Case insensitive search over User model
# @param field [string] The name of the field being queried
# @param val [string] The string to search for, case insensitive. val is duck typed to check whether or not downcase method exist
# @return [ActiveRecord::Relation] The result of the search
def self.where_case_insensitive(field, val)
User.where("lower(#{field}) = ?", val.respond_to?(:downcase) ? val.downcase : val.to_s)
end
end
| 1 | 17,380 | Mysql allows for `||` concatenation (e.g. `firstname||' '||surname`) if you enable it: `set sql_mode=PIPES_AS_CONCAT;`. I think this check is safer though | DMPRoadmap-roadmap | rb |
@@ -5,11 +5,12 @@
package ens
import (
+ "bytes"
"errors"
"fmt"
"strings"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
goens "github.com/wealdtech/go-ens/v3"
| 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ens
import (
"errors"
"fmt"
"strings"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/ethclient"
goens "github.com/wealdtech/go-ens/v3"
"github.com/ethersphere/bee/pkg/resolver/client"
"github.com/ethersphere/bee/pkg/swarm"
)
const swarmContentHashPrefix = "/swarm/"
// Address is the swarm bzz address.
type Address = swarm.Address
// Make sure Client implements the resolver.Client interface.
var _ client.Interface = (*Client)(nil)
var (
// ErrFailedToConnect denotes that the resolver failed to connect to the
// provided endpoint.
ErrFailedToConnect = errors.New("failed to connect")
// ErrResolveFailed denotes that a name could not be resolved.
ErrResolveFailed = errors.New("resolve failed")
// ErrInvalidContentHash denotes that the value of the contenthash record is
// not valid.
ErrInvalidContentHash = errors.New("invalid swarm content hash")
// errNotImplemented denotes that the function has not been implemented.
errNotImplemented = errors.New("function not implemented")
)
// Client is a name resolution client that can connect to ENS via an
// Ethereum endpoint.
type Client struct {
endpoint string
ethCl *ethclient.Client
dialFn func(string) (*ethclient.Client, error)
resolveFn func(bind.ContractBackend, string) (string, error)
}
// Option is a function that applies an option to a Client.
type Option func(*Client)
// NewClient will return a new Client.
func NewClient(endpoint string, opts ...Option) (client.Interface, error) {
c := &Client{
endpoint: endpoint,
dialFn: ethclient.Dial,
resolveFn: wrapResolve,
}
// Apply all options to the Client.
for _, o := range opts {
o(c)
}
// Connect to the name resolution service.
if c.dialFn == nil {
return nil, fmt.Errorf("dialFn: %w", errNotImplemented)
}
ethCl, err := c.dialFn(c.endpoint)
if err != nil {
return nil, fmt.Errorf("%v: %w", err, ErrFailedToConnect)
}
c.ethCl = ethCl
return c, nil
}
// IsConnected returns true if there is an active RPC connection with an
// Ethereum node at the configured endpoint.
func (c *Client) IsConnected() bool {
return c.ethCl != nil
}
// Endpoint returns the endpoint the client was connected to.
func (c *Client) Endpoint() string {
return c.endpoint
}
// Resolve implements the resolver.Client interface.
func (c *Client) Resolve(name string) (Address, error) {
if c.resolveFn == nil {
return swarm.ZeroAddress, fmt.Errorf("resolveFn: %w", errNotImplemented)
}
hash, err := c.resolveFn(c.ethCl, name)
if err != nil {
return swarm.ZeroAddress, fmt.Errorf("%v: %w", err, ErrResolveFailed)
}
// Ensure that the content hash string is in a valid format, eg.
// "/swarm/<address>".
if !strings.HasPrefix(hash, swarmContentHashPrefix) {
return swarm.ZeroAddress, fmt.Errorf("contenthash %s: %w", hash, ErrInvalidContentHash)
}
// Trim the prefix and try to parse the result as a bzz address.
return swarm.ParseHexAddress(strings.TrimPrefix(hash, swarmContentHashPrefix))
}
// Close closes the RPC connection with the client, terminating all unfinished
// requests. If the connection is already closed, this call is a noop.
func (c *Client) Close() error {
if c.ethCl != nil {
c.ethCl.Close()
}
c.ethCl = nil
return nil
}
func wrapResolve(backend bind.ContractBackend, name string) (string, error) {
// Connect to the ENS resolver for the provided name.
ensR, err := goens.NewResolver(backend, name)
if err != nil {
return "", err
}
// Try and read out the content hash record.
ch, err := ensR.Contenthash()
if err != nil {
return "", err
}
return goens.ContenthashToString(ch)
}
| 1 | 13,126 | NOTE: this is the default ENS registry address. If the ENS suite is deployed from the current builds to any chain, it will always be found at this address. | ethersphere-bee | go |
@@ -26,6 +26,7 @@ Upcase::Application.routes.draw do
draw :teams
draw :trails
draw :users
+ draw :vanity
draw :videos
root to: "homes#show" | 1 | class ActionDispatch::Routing::Mapper
def draw(routes_name)
instance_eval(File.read(
Rails.root.join("config/routes/#{routes_name}.rb")
))
end
end
Upcase::Application.routes.draw do
use_doorkeeper
draw :redirects
draw :admin
draw :api
draw :clearance
draw :decks
draw :exercises
draw :pages
draw :plan
draw :podcasts
draw :repositories
draw :search
draw :shows
draw :stripe
draw :subscriber
draw :teams
draw :trails
draw :users
draw :videos
root to: "homes#show"
resource :annual_billing, only: :new
resource :credit_card, only: [:update]
resource :forum_sessions, only: :new
resources :payments, only: [:new]
resources :signups, only: [:create]
resource :subscription, only: [:new, :edit, :update]
resources :coupons, only: :show
resources :topics, only: :index, constraints: { format: "css" }
resources :onboardings, only: :create
get "pages/welcome", to: "high_voltage#show", as: "welcome"
resources(
:design_for_developers_resources,
path: "design-for-developers-resources",
only: [:index, :show]
)
resources(
:test_driven_rails_resources,
path: "test-driven-rails-resources",
only: [:index]
)
get "join" => "subscriptions#new", as: :join
get "practice" => "practice#show", as: :practice
get "explore" => "explore#show", as: :explore
get "sitemap.xml" => "sitemaps#show", as: :sitemap, format: "xml"
get ":id" => "topics#show", as: :topic
get ":topic_id/resources" => redirect("/%{topic_id}")
get "/auth/:provider/callback", to: "auth_callbacks#create"
end
| 1 | 15,361 | I'm curious why you are using `draw` in this route file? | thoughtbot-upcase | rb |
@@ -195,7 +195,18 @@ func Diff(ctx *context.Context) {
}
func RawDiff(ctx *context.Context) {
- panic("not implemented")
+ userName := ctx.Repo.Owner.Name
+ repoName := ctx.Repo.Repository.Name
+ commitID := ctx.Params(":sha")
+ diffType := ctx.Params(":ext")
+
+ diff, err := models.GetRawDiff(models.RepoPath(userName, repoName),
+ commitID, diffType)
+ if err != nil {
+ ctx.Handle(404, "GetRawDiff", err)
+ return
+ }
+ ctx.HandleText(200, diff)
}
func CompareDiff(ctx *context.Context) { | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"container/list"
"path"
"github.com/Unknwon/paginater"
"github.com/gogits/git-module"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/context"
"github.com/gogits/gogs/modules/setting"
)
const (
COMMITS base.TplName = "repo/commits"
DIFF base.TplName = "repo/diff"
)
func RefCommits(ctx *context.Context) {
switch {
case len(ctx.Repo.TreeName) == 0:
Commits(ctx)
case ctx.Repo.TreeName == "search":
SearchCommits(ctx)
default:
FileHistory(ctx)
}
}
func RenderIssueLinks(oldCommits *list.List, repoLink string) *list.List {
newCommits := list.New()
for e := oldCommits.Front(); e != nil; e = e.Next() {
c := e.Value.(*git.Commit)
newCommits.PushBack(c)
}
return newCommits
}
func Commits(ctx *context.Context) {
ctx.Data["PageIsCommits"] = true
commitsCount, err := ctx.Repo.Commit.CommitsCount()
if err != nil {
ctx.Handle(500, "GetCommitsCount", err)
return
}
page := ctx.QueryInt("page")
if page <= 1 {
page = 1
}
ctx.Data["Page"] = paginater.New(int(commitsCount), git.CommitsRangeSize, page, 5)
// Both `git log branchName` and `git log commitId` work.
commits, err := ctx.Repo.Commit.CommitsByRange(page)
if err != nil {
ctx.Handle(500, "CommitsByRange", err)
return
}
commits = RenderIssueLinks(commits, ctx.Repo.RepoLink)
commits = models.ValidateCommitsWithEmails(commits)
ctx.Data["Commits"] = commits
ctx.Data["Username"] = ctx.Repo.Owner.Name
ctx.Data["Reponame"] = ctx.Repo.Repository.Name
ctx.Data["CommitCount"] = commitsCount
ctx.Data["Branch"] = ctx.Repo.BranchName
ctx.HTML(200, COMMITS)
}
func SearchCommits(ctx *context.Context) {
ctx.Data["PageIsCommits"] = true
keyword := ctx.Query("q")
if len(keyword) == 0 {
ctx.Redirect(ctx.Repo.RepoLink + "/commits/" + ctx.Repo.BranchName)
return
}
commits, err := ctx.Repo.Commit.SearchCommits(keyword)
if err != nil {
ctx.Handle(500, "SearchCommits", err)
return
}
commits = RenderIssueLinks(commits, ctx.Repo.RepoLink)
commits = models.ValidateCommitsWithEmails(commits)
ctx.Data["Commits"] = commits
ctx.Data["Keyword"] = keyword
ctx.Data["Username"] = ctx.Repo.Owner.Name
ctx.Data["Reponame"] = ctx.Repo.Repository.Name
ctx.Data["CommitCount"] = commits.Len()
ctx.Data["Branch"] = ctx.Repo.BranchName
ctx.HTML(200, COMMITS)
}
func FileHistory(ctx *context.Context) {
ctx.Data["IsRepoToolbarCommits"] = true
fileName := ctx.Repo.TreeName
if len(fileName) == 0 {
Commits(ctx)
return
}
branchName := ctx.Repo.BranchName
commitsCount, err := ctx.Repo.GitRepo.FileCommitsCount(branchName, fileName)
if err != nil {
ctx.Handle(500, "FileCommitsCount", err)
return
} else if commitsCount == 0 {
ctx.Handle(404, "FileCommitsCount", nil)
return
}
page := ctx.QueryInt("page")
if page <= 1 {
page = 1
}
ctx.Data["Page"] = paginater.New(int(commitsCount), git.CommitsRangeSize, page, 5)
commits, err := ctx.Repo.GitRepo.CommitsByFileAndRange(branchName, fileName, page)
if err != nil {
ctx.Handle(500, "CommitsByFileAndRange", err)
return
}
commits = RenderIssueLinks(commits, ctx.Repo.RepoLink)
commits = models.ValidateCommitsWithEmails(commits)
ctx.Data["Commits"] = commits
ctx.Data["Username"] = ctx.Repo.Owner.Name
ctx.Data["Reponame"] = ctx.Repo.Repository.Name
ctx.Data["FileName"] = fileName
ctx.Data["CommitCount"] = commitsCount
ctx.Data["Branch"] = branchName
ctx.HTML(200, COMMITS)
}
func Diff(ctx *context.Context) {
ctx.Data["PageIsDiff"] = true
userName := ctx.Repo.Owner.Name
repoName := ctx.Repo.Repository.Name
commitID := ctx.Params(":sha")
commit, err := ctx.Repo.GitRepo.GetCommit(commitID)
if err != nil {
ctx.Handle(500, "Repo.GitRepo.GetCommit", err)
return
}
diff, err := models.GetDiffCommit(models.RepoPath(userName, repoName),
commitID, setting.Git.MaxGitDiffLines,
setting.Git.MaxGitDiffLineCharacters, setting.Git.MaxGitDiffFiles)
if err != nil {
ctx.Handle(404, "GetDiffCommit", err)
return
}
parents := make([]string, commit.ParentCount())
for i := 0; i < commit.ParentCount(); i++ {
sha, err := commit.ParentID(i)
parents[i] = sha.String()
if err != nil {
ctx.Handle(404, "repo.Diff", err)
return
}
}
ctx.Data["CommitID"] = commitID
ctx.Data["IsSplitStyle"] = ctx.Query("style") == "split"
ctx.Data["Username"] = userName
ctx.Data["Reponame"] = repoName
ctx.Data["IsImageFile"] = commit.IsImageFile
ctx.Data["Title"] = commit.Summary() + " · " + base.ShortSha(commitID)
ctx.Data["Commit"] = commit
ctx.Data["Author"] = models.ValidateCommitWithEmail(commit)
ctx.Data["Diff"] = diff
ctx.Data["Parents"] = parents
ctx.Data["DiffNotAvailable"] = diff.NumFiles() == 0
ctx.Data["SourcePath"] = setting.AppSubUrl + "/" + path.Join(userName, repoName, "src", commitID)
if commit.ParentCount() > 0 {
ctx.Data["BeforeSourcePath"] = setting.AppSubUrl + "/" + path.Join(userName, repoName, "src", parents[0])
}
ctx.Data["RawPath"] = setting.AppSubUrl + "/" + path.Join(userName, repoName, "raw", commitID)
ctx.Data["RequireHighlightJS"] = true
ctx.HTML(200, DIFF)
}
func RawDiff(ctx *context.Context) {
panic("not implemented")
}
func CompareDiff(ctx *context.Context) {
ctx.Data["IsRepoToolbarCommits"] = true
ctx.Data["IsDiffCompare"] = true
userName := ctx.Repo.Owner.Name
repoName := ctx.Repo.Repository.Name
beforeCommitID := ctx.Params(":before")
afterCommitID := ctx.Params(":after")
commit, err := ctx.Repo.GitRepo.GetCommit(afterCommitID)
if err != nil {
ctx.Handle(404, "GetCommit", err)
return
}
diff, err := models.GetDiffRange(models.RepoPath(userName, repoName), beforeCommitID,
afterCommitID, setting.Git.MaxGitDiffLines,
setting.Git.MaxGitDiffLineCharacters, setting.Git.MaxGitDiffFiles)
if err != nil {
ctx.Handle(404, "GetDiffRange", err)
return
}
commits, err := commit.CommitsBeforeUntil(beforeCommitID)
if err != nil {
ctx.Handle(500, "CommitsBeforeUntil", err)
return
}
commits = models.ValidateCommitsWithEmails(commits)
ctx.Data["IsSplitStyle"] = ctx.Query("style") == "split"
ctx.Data["CommitRepoLink"] = ctx.Repo.RepoLink
ctx.Data["Commits"] = commits
ctx.Data["CommitCount"] = commits.Len()
ctx.Data["BeforeCommitID"] = beforeCommitID
ctx.Data["AfterCommitID"] = afterCommitID
ctx.Data["Username"] = userName
ctx.Data["Reponame"] = repoName
ctx.Data["IsImageFile"] = commit.IsImageFile
ctx.Data["Title"] = "Comparing " + base.ShortSha(beforeCommitID) + "..." + base.ShortSha(afterCommitID) + " · " + userName + "/" + repoName
ctx.Data["Commit"] = commit
ctx.Data["Diff"] = diff
ctx.Data["DiffNotAvailable"] = diff.NumFiles() == 0
ctx.Data["SourcePath"] = setting.AppSubUrl + "/" + path.Join(userName, repoName, "src", afterCommitID)
ctx.Data["BeforeSourcePath"] = setting.AppSubUrl + "/" + path.Join(userName, repoName, "src", beforeCommitID)
ctx.Data["RawPath"] = setting.AppSubUrl + "/" + path.Join(userName, repoName, "raw", afterCommitID)
ctx.HTML(200, DIFF)
}
| 1 | 11,544 | Those variables are only been used once, I think we don't need to create them at all, just pass values to the `GetRawDiff` directly. | gogs-gogs | go |
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
-#if NET461 || NET452
+#if NET461
using System;
using System.Linq; | 1 | // <copyright file="AspNetWebFormsTests.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
#if NET461 || NET452
using System;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Threading.Tasks;
using Datadog.Trace.TestHelpers;
using Xunit;
using Xunit.Abstractions;
namespace Datadog.Trace.ClrProfiler.IntegrationTests
{
[Collection("IisTests")]
public class AspNetWebFormsTests : TestHelper, IClassFixture<IisFixture>
{
private readonly IisFixture _iisFixture;
// NOTE: Would pass this in addition to the name/output to the new constructor if we removed the Samples.WebForms copied project in favor of the demo repo source project...
// $"../dd-trace-demo/dotnet-coffeehouse/Datadog.Coffeehouse.WebForms",
public AspNetWebFormsTests(IisFixture iisFixture, ITestOutputHelper output)
: base("WebForms", @"test\test-applications\aspnet", output)
{
SetServiceVersion("1.0.0");
_iisFixture = iisFixture;
_iisFixture.ShutdownPath = "/account/login?shutdown=1";
_iisFixture.TryStartIis(this, IisAppType.AspNetIntegrated);
}
[SkippableTheory]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
[Trait("LoadFromGAC", "True")]
[InlineData("/Account/Login", "GET /account/login", false)]
public async Task SubmitsTraces(
string path,
string expectedResourceName,
bool isError)
{
await AssertAspNetSpanOnly(
path,
_iisFixture.Agent,
_iisFixture.HttpPort,
HttpStatusCode.OK,
isError,
expectedErrorType: null,
expectedErrorMessage: null,
SpanTypes.Web,
expectedResourceName,
"1.0.0");
}
[Fact(Skip = "This test requires Elasticsearch to be running on the host, which is not currently enabled in CI.")]
[Trait("Category", "EndToEnd")]
[Trait("RunOnWindows", "True")]
[Trait("LoadFromGAC", "True")]
public async Task NestedAsyncElasticCallSubmitsTrace()
{
var testStart = DateTime.UtcNow;
using (var httpClient = new HttpClient())
{
// disable tracing for this HttpClient request
httpClient.DefaultRequestHeaders.Add(HttpHeaderNames.TracingEnabled, "false");
var response = await httpClient.GetAsync($"http://localhost:{_iisFixture.HttpPort}" + "/Database/Elasticsearch");
var content = await response.Content.ReadAsStringAsync();
Output.WriteLine($"[http] {response.StatusCode} {content}");
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
}
var allSpans = _iisFixture.Agent.WaitForSpans(3, minDateTime: testStart)
.OrderBy(s => s.Start)
.ToList();
Assert.True(allSpans.Count > 0, "Expected there to be spans.");
var elasticSpans = allSpans
.Where(s => s.Type == "elasticsearch")
.ToList();
Assert.True(elasticSpans.Count > 0, "Expected elasticsearch spans.");
foreach (var span in elasticSpans)
{
Assert.Equal("elasticsearch.query", span.Name);
Assert.Equal("Development Web Site-elasticsearch", span.Service);
Assert.Equal("elasticsearch", span.Type);
}
}
}
}
#endif
| 1 | 23,414 | I like the idea of replacing all the `#if NET461` with `#if NETFRAMEWORK`... is that worth doing now? Means fewer changes if we go to 4.7.2 at some point | DataDog-dd-trace-dotnet | .cs |
@@ -21,6 +21,8 @@ import (
// Compatible with Okta offline access, a holdover from previous defaults.
var defaultScopes = []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, "email"}
+const clutchProviderName = "clutch"
+
type OIDCProvider struct {
provider *oidc.Provider
verifier *oidc.IDTokenVerifier | 1 | package authn
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/coreos/go-oidc/v3/oidc"
"github.com/dgrijalva/jwt-go"
"github.com/google/uuid"
"golang.org/x/oauth2"
authnv1 "github.com/lyft/clutch/backend/api/config/service/authn/v1"
)
// Default scopes, used if no scopes are provided in the configuration.
// Compatible with Okta offline access, a holdover from previous defaults.
var defaultScopes = []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, "email"}
type OIDCProvider struct {
provider *oidc.Provider
verifier *oidc.IDTokenVerifier
oauth2 *oauth2.Config
httpClient *http.Client
sessionSecret string
tokenStorage Storage
providerAlias string
claimsFromOIDCToken ClaimsFromOIDCTokenFunc
}
// Clutch's state token claims used during the exchange.
type stateClaims struct {
*jwt.StandardClaims
RedirectURL string `json:"redirect"`
}
// Intermediate claims object for the ID token. Based on what scopes were requested.
type idClaims struct {
Email string `json:"email"`
}
func WithClaimsFromOIDCTokenFunc(p *OIDCProvider, fn ClaimsFromOIDCTokenFunc) *OIDCProvider {
ret := *p
ret.claimsFromOIDCToken = fn
return &ret
}
func (p *OIDCProvider) GetAuthCodeURL(ctx context.Context, state string) (string, error) {
opts := []oauth2.AuthCodeOption{oauth2.AccessTypeOffline}
return p.oauth2.AuthCodeURL(state, opts...), nil
}
func (p *OIDCProvider) ValidateStateNonce(state string) (string, error) {
claims := &stateClaims{}
_, err := jwt.ParseWithClaims(state, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(p.sessionSecret), nil
})
if err != nil {
return "", err
}
if err := claims.Valid(); err != nil {
return "", err
}
return claims.RedirectURL, nil
}
func (p *OIDCProvider) GetStateNonce(redirectURL string) (string, error) {
u, err := url.Parse(redirectURL)
if err != nil {
return "", err
}
if u.Scheme != "" || u.Host != "" {
return "", errors.New("only relative redirects are supported")
}
dest := u.Path
if !strings.HasPrefix(dest, "/") {
dest = fmt.Sprintf("/%s", dest)
}
claims := &stateClaims{
StandardClaims: &jwt.StandardClaims{
Subject: uuid.New().String(), // UUID serves as CSRF token.
ExpiresAt: time.Now().Add(time.Minute * 5).Unix(),
IssuedAt: time.Now().Unix(),
},
RedirectURL: dest,
}
return jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(p.sessionSecret))
}
func (p *OIDCProvider) Exchange(ctx context.Context, code string) (*oauth2.Token, error) {
// Exchange.
ctx = oidc.ClientContext(ctx, p.httpClient)
// offline_access is used to request issuance of a refresh_token. Some providers may request it as a scope though.
// Also it may need to be configurable in the future depending on the requirements of other providers or users.
token, err := p.oauth2.Exchange(ctx, code, oauth2.AccessTypeOffline)
if err != nil {
return nil, err
}
rawIDToken, ok := token.Extra("id_token").(string)
if !ok {
return nil, errors.New("'id_token' was not present in oauth token")
}
// Verify. This is superfluous since the token was just issued but it can't hurt.
idToken, err := p.verifier.Verify(ctx, rawIDToken)
if err != nil {
return nil, err
}
// Issue token with claims.
claims, err := p.claimsFromOIDCToken(ctx, idToken)
if err != nil {
return nil, err
}
if p.tokenStorage != nil {
err := p.tokenStorage.Store(ctx, claims.Subject, p.providerAlias, token)
if err != nil {
return nil, err
}
}
// Sign and issue token.
accessToken, err := jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(p.sessionSecret))
if err != nil {
return nil, err
}
t := &oauth2.Token{
AccessToken: accessToken,
Expiry: time.Unix(claims.ExpiresAt, 0),
RefreshToken: "", // TODO: implement refresh_token flow with stateful sessions.
TokenType: "Bearer",
}
return t, err
}
type ClaimsFromOIDCTokenFunc func(ctx context.Context, t *oidc.IDToken) (*Claims, error)
// Extract claims from an OIDC token and return Clutch's standard claims object. This could be configurable at a later
// date to support subjects with IDs other than email (e.g. GitHub ID).
func DefaultClaimsFromOIDCToken(ctx context.Context, t *oidc.IDToken) (*Claims, error) {
idc := &idClaims{}
if err := t.Claims(idc); err != nil {
return nil, err
}
if idc.Email == "" {
return nil, errors.New("claims did not deserialize with desired fields")
}
sc := oidcTokenToStandardClaims(t)
sc.Subject = idc.Email
return &Claims{
StandardClaims: sc,
Groups: []string{""},
}, nil
}
func (p *OIDCProvider) Verify(ctx context.Context, rawToken string) (*Claims, error) {
claims := &Claims{}
_, err := jwt.ParseWithClaims(rawToken, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(p.sessionSecret), nil
})
if err != nil {
return nil, err
}
if err := claims.Valid(); err != nil {
return nil, err
}
return claims, nil
}
func NewOIDCProvider(ctx context.Context, config *authnv1.Config, tokenStorage Storage) (Provider, error) {
c := config.GetOidc()
// Allows injection of test client. If client not present then add the default.
if v := ctx.Value(oauth2.HTTPClient); v == nil {
ctx = oidc.ClientContext(ctx, &http.Client{})
}
u, err := url.Parse(c.Issuer)
if err != nil {
return nil, err
}
alias := u.Hostname()
provider, err := oidc.NewProvider(ctx, c.Issuer)
if err != nil {
return nil, err
}
verifier := provider.Verifier(&oidc.Config{
ClientID: c.ClientId,
})
scopes := c.Scopes
if len(scopes) == 0 {
scopes = defaultScopes
}
oc := &oauth2.Config{
ClientID: c.ClientId,
ClientSecret: c.ClientSecret,
Endpoint: provider.Endpoint(),
RedirectURL: c.RedirectUrl,
Scopes: scopes,
}
// Verify the provider implements the same flow we do.
pClaims := &oidcProviderClaims{}
if err := provider.Claims(pClaims); err != nil {
return nil, err
}
if err := pClaims.Check("authorization_code"); err != nil {
return nil, err
}
p := &OIDCProvider{
providerAlias: alias,
provider: provider,
verifier: verifier,
oauth2: oc,
httpClient: ctx.Value(oauth2.HTTPClient).(*http.Client),
sessionSecret: config.SessionSecret,
claimsFromOIDCToken: DefaultClaimsFromOIDCToken,
tokenStorage: tokenStorage,
}
return p, nil
}
func oidcTokenToStandardClaims(t *oidc.IDToken) *jwt.StandardClaims {
return &jwt.StandardClaims{
ExpiresAt: t.Expiry.Unix(),
IssuedAt: t.IssuedAt.Unix(),
Issuer: t.Issuer,
Subject: t.Subject,
}
}
// Evaluates what flows the provider claims to support.
type oidcProviderClaims struct {
GrantTypesSupported []string `json:"grant_types_supported"`
}
func (pc *oidcProviderClaims) Check(grantType string) error {
for _, gt := range pc.GrantTypesSupported {
if gt == grantType {
return nil
}
}
return fmt.Errorf("grant type '%s' not supported by provider. supported: %v", grantType, pc.GrantTypesSupported)
}
| 1 | 10,546 | to fix the docs build failure, make this const var named without the suffix `Name` | lyft-clutch | go |
@@ -682,6 +682,7 @@ func checkClientTLSCertSubject(c *client, fn tlsMapAuthFn) bool {
return true
}
}
+ fallthrough
case hasURIs:
for _, u := range cert.URIs {
if match, ok := fn(u.String(), nil); ok { | 1 | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"crypto/tls"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/base64"
"fmt"
"net"
"regexp"
"strings"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats-server/v2/internal/ldap"
"github.com/nats-io/nkeys"
"golang.org/x/crypto/bcrypt"
)
// Authentication is an interface for implementing authentication
type Authentication interface {
// Check if a client is authorized to connect
Check(c ClientAuthentication) bool
}
// ClientAuthentication is an interface for client authentication
type ClientAuthentication interface {
// Get options associated with a client
GetOpts() *clientOpts
// If TLS is enabled, TLS ConnectionState, nil otherwise
GetTLSConnectionState() *tls.ConnectionState
// Optionally map a user after auth.
RegisterUser(*User)
// RemoteAddress expose the connection information of the client
RemoteAddress() net.Addr
}
// NkeyUser is for multiple nkey based users
type NkeyUser struct {
Nkey string `json:"user"`
Permissions *Permissions `json:"permissions,omitempty"`
Account *Account `json:"account,omitempty"`
SigningKey string `json:"signing_key,omitempty"`
}
// User is for multiple accounts/users.
type User struct {
Username string `json:"user"`
Password string `json:"password"`
Permissions *Permissions `json:"permissions,omitempty"`
Account *Account `json:"account,omitempty"`
}
// clone performs a deep copy of the User struct, returning a new clone with
// all values copied.
func (u *User) clone() *User {
if u == nil {
return nil
}
clone := &User{}
*clone = *u
clone.Permissions = u.Permissions.clone()
return clone
}
// clone performs a deep copy of the NkeyUser struct, returning a new clone with
// all values copied.
func (n *NkeyUser) clone() *NkeyUser {
if n == nil {
return nil
}
clone := &NkeyUser{}
*clone = *n
clone.Permissions = n.Permissions.clone()
return clone
}
// SubjectPermission is an individual allow and deny struct for publish
// and subscribe authorizations.
type SubjectPermission struct {
Allow []string `json:"allow,omitempty"`
Deny []string `json:"deny,omitempty"`
}
// ResponsePermission can be used to allow responses to any reply subject
// that is received on a valid subscription.
type ResponsePermission struct {
MaxMsgs int `json:"max"`
Expires time.Duration `json:"ttl"`
}
// Permissions are the allowed subjects on a per
// publish or subscribe basis.
type Permissions struct {
Publish *SubjectPermission `json:"publish"`
Subscribe *SubjectPermission `json:"subscribe"`
Response *ResponsePermission `json:"responses,omitempty"`
}
// RoutePermissions are similar to user permissions
// but describe what a server can import/export from and to
// another server.
type RoutePermissions struct {
Import *SubjectPermission `json:"import"`
Export *SubjectPermission `json:"export"`
}
// clone will clone an individual subject permission.
func (p *SubjectPermission) clone() *SubjectPermission {
if p == nil {
return nil
}
clone := &SubjectPermission{}
if p.Allow != nil {
clone.Allow = make([]string, len(p.Allow))
copy(clone.Allow, p.Allow)
}
if p.Deny != nil {
clone.Deny = make([]string, len(p.Deny))
copy(clone.Deny, p.Deny)
}
return clone
}
// clone performs a deep copy of the Permissions struct, returning a new clone
// with all values copied.
func (p *Permissions) clone() *Permissions {
if p == nil {
return nil
}
clone := &Permissions{}
if p.Publish != nil {
clone.Publish = p.Publish.clone()
}
if p.Subscribe != nil {
clone.Subscribe = p.Subscribe.clone()
}
if p.Response != nil {
clone.Response = &ResponsePermission{
MaxMsgs: p.Response.MaxMsgs,
Expires: p.Response.Expires,
}
}
return clone
}
// checkAuthforWarnings will look for insecure settings and log concerns.
// Lock is assumed held.
func (s *Server) checkAuthforWarnings() {
warn := false
if s.opts.Password != "" && !isBcrypt(s.opts.Password) {
warn = true
}
for _, u := range s.users {
// Skip warn if using TLS certs based auth
// unless a password has been left in the config.
if u.Password == "" && s.opts.TLSMap {
continue
}
if !isBcrypt(u.Password) {
warn = true
break
}
}
if warn {
// Warning about using plaintext passwords.
s.Warnf("Plaintext passwords detected, use nkeys or bcrypt")
}
}
// If Users or Nkeys options have definitions without an account defined,
// assign them to the default global account.
// Lock should be held.
func (s *Server) assignGlobalAccountToOrphanUsers(nkeys map[string]*NkeyUser, users map[string]*User) {
for _, u := range users {
if u.Account == nil {
u.Account = s.gacc
}
}
for _, u := range nkeys {
if u.Account == nil {
u.Account = s.gacc
}
}
}
// If the given permissions has a ResponsePermission
// set, ensure that defaults are set (if values are 0)
// and that a Publish permission is set, and Allow
// is disabled if not explicitly set.
func validateResponsePermissions(p *Permissions) {
if p == nil || p.Response == nil {
return
}
if p.Publish == nil {
p.Publish = &SubjectPermission{}
}
if p.Publish.Allow == nil {
// We turn off the blanket allow statement.
p.Publish.Allow = []string{}
}
// If there is a response permission, ensure
// that if value is 0, we set the default value.
if p.Response.MaxMsgs == 0 {
p.Response.MaxMsgs = DEFAULT_ALLOW_RESPONSE_MAX_MSGS
}
if p.Response.Expires == 0 {
p.Response.Expires = DEFAULT_ALLOW_RESPONSE_EXPIRATION
}
}
// configureAuthorization will do any setup needed for authorization.
// Lock is assumed held.
func (s *Server) configureAuthorization() {
opts := s.getOpts()
if opts == nil {
return
}
// Check for multiple users first
// This just checks and sets up the user map if we have multiple users.
if opts.CustomClientAuthentication != nil {
s.info.AuthRequired = true
} else if len(s.trustedKeys) > 0 {
s.info.AuthRequired = true
} else if opts.Nkeys != nil || opts.Users != nil {
s.nkeys, s.users = s.buildNkeysAndUsersFromOptions(opts.Nkeys, opts.Users)
s.info.AuthRequired = true
} else if opts.Username != "" || opts.Authorization != "" {
s.info.AuthRequired = true
} else {
s.users = nil
s.nkeys = nil
s.info.AuthRequired = false
}
// Do similar for websocket config
s.wsConfigAuth(&opts.Websocket)
}
// Takes the given slices of NkeyUser and User options and build
// corresponding maps used by the server. The users are cloned
// so that server does not reference options.
// The global account is assigned to users that don't have an
// existing account.
// Server lock is held on entry.
func (s *Server) buildNkeysAndUsersFromOptions(nko []*NkeyUser, uo []*User) (map[string]*NkeyUser, map[string]*User) {
var nkeys map[string]*NkeyUser
var users map[string]*User
if nko != nil {
nkeys = make(map[string]*NkeyUser, len(nko))
for _, u := range nko {
copy := u.clone()
if u.Account != nil {
if v, ok := s.accounts.Load(u.Account.Name); ok {
copy.Account = v.(*Account)
}
}
if copy.Permissions != nil {
validateResponsePermissions(copy.Permissions)
}
nkeys[u.Nkey] = copy
}
}
if uo != nil {
users = make(map[string]*User, len(uo))
for _, u := range uo {
copy := u.clone()
if u.Account != nil {
if v, ok := s.accounts.Load(u.Account.Name); ok {
copy.Account = v.(*Account)
}
}
if copy.Permissions != nil {
validateResponsePermissions(copy.Permissions)
}
users[u.Username] = copy
}
}
s.assignGlobalAccountToOrphanUsers(nkeys, users)
return nkeys, users
}
// checkAuthentication will check based on client type and
// return boolean indicating if client is authorized.
func (s *Server) checkAuthentication(c *client) bool {
switch c.kind {
case CLIENT:
return s.isClientAuthorized(c)
case ROUTER:
return s.isRouterAuthorized(c)
case GATEWAY:
return s.isGatewayAuthorized(c)
case LEAF:
return s.isLeafNodeAuthorized(c)
default:
return false
}
}
// isClientAuthorized will check the client against the proper authorization method and data.
// This could be nkey, token, or username/password based.
func (s *Server) isClientAuthorized(c *client) bool {
opts := s.getOpts()
// Check custom auth first, then jwts, then nkeys, then
// multiple users with TLS map if enabled, then token,
// then single user/pass.
if opts.CustomClientAuthentication != nil {
return opts.CustomClientAuthentication.Check(c)
}
return s.processClientOrLeafAuthentication(c, opts)
}
func (s *Server) processClientOrLeafAuthentication(c *client, opts *Options) bool {
var (
nkey *NkeyUser
juc *jwt.UserClaims
acc *Account
user *User
ok bool
err error
ao bool // auth override
)
s.mu.Lock()
authRequired := s.info.AuthRequired
// c.ws is immutable, but may need lock if we get race reports.
if !authRequired && c.ws != nil {
// If no auth required for regular clients, then check if
// we have an override for websocket clients.
authRequired = s.websocket.authOverride
}
if !authRequired {
// TODO(dlc) - If they send us credentials should we fail?
s.mu.Unlock()
return true
}
var (
username string
password string
token string
noAuthUser string
users map[string]*User
nkusers map[string]*NkeyUser
)
tlsMap := opts.TLSMap
if c.ws != nil {
wo := &opts.Websocket
// Always override TLSMap.
tlsMap = wo.TLSMap
// The rest depends on if there was any auth override in
// the websocket's config.
if s.websocket.authOverride {
noAuthUser = wo.NoAuthUser
username = wo.Username
password = wo.Password
token = wo.Token
users = s.websocket.users
nkusers = s.websocket.nkeys
ao = true
}
} else if c.kind == LEAF {
tlsMap = opts.LeafNode.TLSMap
}
if !ao {
noAuthUser = opts.NoAuthUser
username = opts.Username
password = opts.Password
token = opts.Authorization
users = s.users
nkusers = s.nkeys
}
// Check if we have trustedKeys defined in the server. If so we require a user jwt.
if s.trustedKeys != nil {
if c.opts.JWT == "" {
s.mu.Unlock()
c.Debugf("Authentication requires a user JWT")
return false
}
// So we have a valid user jwt here.
juc, err = jwt.DecodeUserClaims(c.opts.JWT)
if err != nil {
s.mu.Unlock()
c.Debugf("User JWT not valid: %v", err)
return false
}
vr := jwt.CreateValidationResults()
juc.Validate(vr)
if vr.IsBlocking(true) {
s.mu.Unlock()
c.Debugf("User JWT no longer valid: %+v", vr)
return false
}
}
// Check if we have nkeys or users for client.
hasNkeys := len(nkusers) > 0
hasUsers := len(users) > 0
if hasNkeys && c.opts.Nkey != "" {
nkey, ok = nkusers[c.opts.Nkey]
if !ok {
s.mu.Unlock()
return false
}
} else if hasUsers {
// Check if we are tls verify and are mapping users from the client_certificate.
if tlsMap {
authorized := checkClientTLSCertSubject(c, func(u string, certRDN *ldap.DN) (string, bool) {
// First do literal lookup using the resulting string representation
// of RDNSequence as implemented by the pkix package from Go.
if u != "" {
usr, ok := users[u]
if !ok {
return "", ok
}
user = usr
return usr.Username, ok
}
if certRDN == nil {
return "", false
}
// Look through the accounts for an RDN that is equal to the one
// presented by the certificate.
for _, usr := range users {
// TODO: Use this utility to make a full validation pass
// on start in case tlsmap feature is being used.
inputRDN, err := ldap.ParseDN(usr.Username)
if err != nil {
continue
}
if inputRDN.Equal(certRDN) {
user = usr
return usr.Username, true
}
}
return "", false
})
if !authorized {
s.mu.Unlock()
return false
}
if c.opts.Username != "" {
s.Warnf("User %q found in connect proto, but user required from cert", c.opts.Username)
}
// Already checked that the client didn't send a user in connect
// but we set it here to be able to identify it in the logs.
c.opts.Username = user.Username
} else {
if c.kind == CLIENT && c.opts.Username == "" && noAuthUser != "" {
if u, exists := users[noAuthUser]; exists {
c.opts.Username = u.Username
c.opts.Password = u.Password
}
}
if c.opts.Username != "" {
user, ok = users[c.opts.Username]
if !ok {
s.mu.Unlock()
return false
}
}
}
}
s.mu.Unlock()
// If we have a jwt and a userClaim, make sure we have the Account, etc associated.
// We need to look up the account. This will use an account resolver if one is present.
if juc != nil {
issuer := juc.Issuer
if juc.IssuerAccount != "" {
issuer = juc.IssuerAccount
}
if acc, err = s.LookupAccount(issuer); acc == nil {
c.Debugf("Account JWT lookup error: %v", err)
return false
}
if !s.isTrustedIssuer(acc.Issuer) {
c.Debugf("Account JWT not signed by trusted operator")
return false
}
if juc.IssuerAccount != "" && !acc.hasIssuer(juc.Issuer) {
c.Debugf("User JWT issuer is not known")
return false
}
if acc.IsExpired() {
c.Debugf("Account JWT has expired")
return false
}
// skip validation of nonce when presented with a bearer token
// FIXME: if BearerToken is only for WSS, need check for server with that port enabled
if !juc.BearerToken {
// Verify the signature against the nonce.
if c.opts.Sig == "" {
c.Debugf("Signature missing")
return false
}
sig, err := base64.RawURLEncoding.DecodeString(c.opts.Sig)
if err != nil {
// Allow fallback to normal base64.
sig, err = base64.StdEncoding.DecodeString(c.opts.Sig)
if err != nil {
c.Debugf("Signature not valid base64")
return false
}
}
pub, err := nkeys.FromPublicKey(juc.Subject)
if err != nil {
c.Debugf("User nkey not valid: %v", err)
return false
}
if err := pub.Verify(c.nonce, sig); err != nil {
c.Debugf("Signature not verified")
return false
}
}
if acc.checkUserRevoked(juc.Subject) {
c.Debugf("User authentication revoked")
return false
}
if !validateSrc(juc, c.host) {
c.Errorf("Bad src Ip %s", c.host)
return false
}
allowNow, validFor := validateTimes(juc)
if !allowNow {
c.Errorf("Outside connect times")
return false
}
nkey = buildInternalNkeyUser(juc, acc)
if err := c.RegisterNkeyUser(nkey); err != nil {
return false
}
// Hold onto the user's public key.
c.pubKey = juc.Subject
// Generate an event if we have a system account.
s.accountConnectEvent(c)
// Check if we need to set an auth timer if the user jwt expires.
c.setExpiration(juc.Claims(), validFor)
return true
}
if nkey != nil {
if c.opts.Sig == "" {
c.Debugf("Signature missing")
return false
}
sig, err := base64.RawURLEncoding.DecodeString(c.opts.Sig)
if err != nil {
// Allow fallback to normal base64.
sig, err = base64.StdEncoding.DecodeString(c.opts.Sig)
if err != nil {
c.Debugf("Signature not valid base64")
return false
}
}
pub, err := nkeys.FromPublicKey(c.opts.Nkey)
if err != nil {
c.Debugf("User nkey not valid: %v", err)
return false
}
if err := pub.Verify(c.nonce, sig); err != nil {
c.Debugf("Signature not verified")
return false
}
if err := c.RegisterNkeyUser(nkey); err != nil {
return false
}
return true
}
if user != nil {
ok = comparePasswords(user.Password, c.opts.Password)
// If we are authorized, register the user which will properly setup any permissions
// for pub/sub authorizations.
if ok {
c.RegisterUser(user)
// Generate an event if we have a system account and this is not the $G account.
s.accountConnectEvent(c)
}
return ok
}
if c.kind == CLIENT {
if token != "" {
return comparePasswords(token, c.opts.Token)
} else if username != "" {
if username != c.opts.Username {
return false
}
return comparePasswords(password, c.opts.Password)
}
} else if c.kind == LEAF {
// There is no required username/password to connect and
// there was no u/p in the CONNECT or none that matches the
// know users. Register the leaf connection with global account
// or the one specified in config (if provided).
return s.registerLeafWithAccount(c, opts.LeafNode.Account)
}
return false
}
func getTLSAuthDCs(rdns *pkix.RDNSequence) string {
dcOID := asn1.ObjectIdentifier{0, 9, 2342, 19200300, 100, 1, 25}
dcs := []string{}
for _, rdn := range *rdns {
if len(rdn) == 0 {
continue
}
for _, atv := range rdn {
value, ok := atv.Value.(string)
if !ok {
continue
}
if atv.Type.Equal(dcOID) {
dcs = append(dcs, "DC="+value)
}
}
}
return strings.Join(dcs, ",")
}
type tlsMapAuthFn func(string, *ldap.DN) (string, bool)
func checkClientTLSCertSubject(c *client, fn tlsMapAuthFn) bool {
tlsState := c.GetTLSConnectionState()
if tlsState == nil {
c.Debugf("User required in cert, no TLS connection state")
return false
}
if len(tlsState.PeerCertificates) == 0 {
c.Debugf("User required in cert, no peer certificates found")
return false
}
cert := tlsState.PeerCertificates[0]
if len(tlsState.PeerCertificates) > 1 {
c.Debugf("Multiple peer certificates found, selecting first")
}
hasSANs := len(cert.DNSNames) > 0
hasEmailAddresses := len(cert.EmailAddresses) > 0
hasSubject := len(cert.Subject.String()) > 0
hasURIs := len(cert.URIs) > 0
if !hasEmailAddresses && !hasSubject && !hasURIs {
c.Debugf("User required in cert, none found")
return false
}
switch {
case hasEmailAddresses:
for _, u := range cert.EmailAddresses {
if match, ok := fn(u, nil); ok {
c.Debugf("Using email found in cert for auth [%q]", match)
return true
}
}
fallthrough
case hasSANs:
for _, u := range cert.DNSNames {
if match, ok := fn(u, nil); ok {
c.Debugf("Using SAN found in cert for auth [%q]", match)
return true
}
}
case hasURIs:
for _, u := range cert.URIs {
if match, ok := fn(u.String(), nil); ok {
c.Debugf("Using URI found in cert for auth [%q]", match)
return true
}
}
}
// Use the string representation of the full RDN Sequence including
// the domain components in case there are any.
rdn := cert.Subject.ToRDNSequence().String()
// Match that follows original order from the subject takes precedence.
dn, err := ldap.FromCertSubject(cert.Subject)
if err == nil {
if match, ok := fn("", dn); ok {
c.Debugf("Using DistinguishedNameMatch for auth [%q]", match)
return true
}
c.Debugf("DistinguishedNameMatch could not be used for auth [%q]", rdn)
}
var rdns pkix.RDNSequence
if _, err := asn1.Unmarshal(cert.RawSubject, &rdns); err == nil {
// If found domain components then include roughly following
// the order from https://tools.ietf.org/html/rfc2253
//
// NOTE: The original sequence from string representation by ToRDNSequence does not follow
// the correct ordering, so this addition ofdomainComponents would likely be deprecated in
// another release in favor of using the correct ordered as parsed by the go-ldap library.
//
dcs := getTLSAuthDCs(&rdns)
if len(dcs) > 0 {
u := strings.Join([]string{rdn, dcs}, ",")
if match, ok := fn(u, nil); ok {
c.Debugf("Using RDNSequence for auth [%q]", match)
return true
}
c.Debugf("RDNSequence could not be used for auth [%q]", u)
}
}
// If no match, then use the string representation of the RDNSequence
// from the subject without the domainComponents.
if match, ok := fn(rdn, nil); ok {
c.Debugf("Using certificate subject for auth [%q]", match)
return true
}
c.Debugf("User in cert [%q], not found", rdn)
return false
}
// checkRouterAuth checks optional router authorization which can be nil or username/password.
func (s *Server) isRouterAuthorized(c *client) bool {
// Snapshot server options.
opts := s.getOpts()
// Check custom auth first, then TLS map if enabled
// then single user/pass.
if s.opts.CustomRouterAuthentication != nil {
return s.opts.CustomRouterAuthentication.Check(c)
}
if opts.Cluster.Username == "" {
return true
}
if opts.Cluster.TLSMap {
return checkClientTLSCertSubject(c, func(user string, _ *ldap.DN) (string, bool) {
return "", opts.Cluster.Username == user
})
}
if opts.Cluster.Username != c.opts.Username {
return false
}
if !comparePasswords(opts.Cluster.Password, c.opts.Password) {
return false
}
return true
}
// isGatewayAuthorized checks optional gateway authorization which can be nil or username/password.
func (s *Server) isGatewayAuthorized(c *client) bool {
// Snapshot server options.
opts := s.getOpts()
if opts.Gateway.Username == "" {
return true
}
// Check whether TLS map is enabled, otherwise use single user/pass.
if opts.Gateway.TLSMap {
return checkClientTLSCertSubject(c, func(user string, _ *ldap.DN) (string, bool) {
return "", opts.Gateway.Username == user
})
}
if opts.Gateway.Username != c.opts.Username {
return false
}
return comparePasswords(opts.Gateway.Password, c.opts.Password)
}
func (s *Server) registerLeafWithAccount(c *client, account string) bool {
var err error
acc := s.globalAccount()
if account != _EMPTY_ {
acc, err = s.lookupAccount(account)
if err != nil {
s.Errorf("authentication of user %q failed, unable to lookup account %q: %v",
c.opts.Username, account, err)
return false
}
}
if err = c.registerWithAccount(acc); err != nil {
return false
}
return true
}
// isLeafNodeAuthorized will check for auth for an inbound leaf node connection.
func (s *Server) isLeafNodeAuthorized(c *client) bool {
opts := s.getOpts()
isAuthorized := func(username, password, account string) bool {
if username != c.opts.Username {
return false
}
if !comparePasswords(password, c.opts.Password) {
return false
}
return s.registerLeafWithAccount(c, account)
}
// If leafnodes config has an authorization{} stanza, this takes precedence.
// The user in CONNECT mutch match. We will bind to the account associated
// with that user (from the leafnode's authorization{} config).
if opts.LeafNode.Username != _EMPTY_ {
return isAuthorized(opts.LeafNode.Username, opts.LeafNode.Password, opts.LeafNode.Account)
} else if len(opts.LeafNode.Users) > 0 {
if opts.LeafNode.TLSMap {
var user *User
found := checkClientTLSCertSubject(c, func(u string, _ *ldap.DN) (string, bool) {
// This is expected to be a very small array.
for _, usr := range opts.LeafNode.Users {
if u == usr.Username {
user = usr
return u, true
}
}
return "", false
})
if !found {
return false
}
if c.opts.Username != "" {
s.Warnf("User %q found in connect proto, but user required from cert", c.opts.Username)
}
c.opts.Username = user.Username
// This will authorize since are using an existing user,
// but it will also register with proper account.
return isAuthorized(user.Username, user.Password, user.Account.GetName())
}
// This is expected to be a very small array.
for _, u := range opts.LeafNode.Users {
if u.Username == c.opts.Username {
var accName string
if u.Account != nil {
accName = u.Account.Name
}
return isAuthorized(u.Username, u.Password, accName)
}
}
return false
}
// We are here if we accept leafnode connections without any credentials.
// Still, if the CONNECT has some user info, we will bind to the
// user's account or to the specified default account (if provided)
// or to the global account.
return s.processClientOrLeafAuthentication(c, opts)
}
// Support for bcrypt stored passwords and tokens.
var validBcryptPrefix = regexp.MustCompile(`^\$2[a,b,x,y]{1}\$\d{2}\$.*`)
// isBcrypt checks whether the given password or token is bcrypted.
func isBcrypt(password string) bool {
if strings.HasPrefix(password, "$") {
return validBcryptPrefix.MatchString(password)
}
return false
}
func comparePasswords(serverPassword, clientPassword string) bool {
// Check to see if the server password is a bcrypt hash
if isBcrypt(serverPassword) {
if err := bcrypt.CompareHashAndPassword([]byte(serverPassword), []byte(clientPassword)); err != nil {
return false
}
} else if serverPassword != clientPassword {
return false
}
return true
}
func validateAuth(o *Options) error {
if o.NoAuthUser == "" {
return nil
}
if len(o.TrustedOperators) > 0 {
return fmt.Errorf("no_auth_user not compatible with Trusted Operator")
}
if o.Users == nil {
return fmt.Errorf(`no_auth_user: "%s" present, but users are not defined`, o.NoAuthUser)
}
for _, u := range o.Users {
if u.Username == o.NoAuthUser {
return nil
}
}
return fmt.Errorf(
`no_auth_user: "%s" not present as user in authorization block or account configuration`,
o.NoAuthUser)
}
| 1 | 11,446 | This bugfix also included, in case a subjectAlternativeName was present in the cert, then URIs (e.g SVID SPIFFE auth) would not have been attempted. | nats-io-nats-server | go |
@@ -0,0 +1,15 @@
+package com.fsck.k9.service;
+
+
+import java.io.File;
+import java.io.IOException;
+
+import android.net.Uri;
+
+
+public interface FileProviderInterface {
+
+ File createProvidedFile() throws IOException;
+ Uri getUriForProvidedFile(File file, String mimeType) throws IOException;
+
+} | 1 | 1 | 13,900 | Same here. Feels like the wrong location. | k9mail-k-9 | java |
|
@@ -42,7 +42,9 @@ from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils)
from qutebrowser.utils.usertypes import KeyMode
-from qutebrowser.misc import editor, guiprocess
+from qutebrowser.misc import editor, guiprocess, mhtml
+from qutebrowser.browser.downloads import (_path_suggestion as
+ download_path_suggestion)
class CommandDispatcher: | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import shlex
import posixpath
import functools
import xml.etree.ElementTree
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWidgets import QApplication, QTabBar
from PyQt5.QtCore import Qt, QUrl, QEvent
from PyQt5.QtGui import QClipboard, QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
from PyQt5.QtWebKitWidgets import QWebPage
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configexc
from qutebrowser.browser import webelem, inspector, urlmarks
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._editor = None
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self):
"""Get a tabbed-browser from a new window."""
from qutebrowser.mainwindow import mainwindow
new_window = mainwindow.MainWindow()
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
return self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window), 'tbw')
if window:
tabbed_browser = self._new_tabbed_browser()
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, explicit=True)
elif background:
tabbed_browser.tabopen(url, background=True, explicit=True)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _scroll_percent(self, perc=None, count=None, orientation=None):
"""Inner logic for scroll_percent_(x|y).
Args:
perc: How many percent to scroll, or None
count: How many percent to scroll, or None
orientation: Qt.Horizontal or Qt.Vertical
"""
if perc is None and count is None:
perc = 100
elif perc is None:
perc = count
if perc == 0:
self.scroll('top')
elif perc == 100:
self.scroll('bottom')
else:
perc = qtutils.check_overflow(perc, 'int', fatal=False)
frame = self._current_widget().page().currentFrame()
m = frame.scrollBarMaximum(orientation)
if m == 0:
return
frame.setScrollBarValue(orientation, int(m * perc / 100))
def _tab_move_absolute(self, idx):
"""Get an index for moving a tab absolutely.
Args:
idx: The index to get, as passed as count.
"""
if idx is None:
return 0
elif idx == 0:
return self._count() - 1
else:
return idx - 1
def _tab_move_relative(self, direction, delta):
"""Get an index for moving a tab relatively.
Args:
direction: + or - for relative moving, None for absolute.
delta: Delta to the current tab.
"""
if delta is None:
# We don't set delta to 1 in the function arguments because this
# gets called from tab_move which has delta set to None by default.
delta = 1
if direction == '-':
return self._current_index() - delta
elif direction == '+':
return self._current_index() + delta
def _tab_focus_last(self):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, left, right, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
left: Force selecting the tab to the left of the current tab.
right: Force selecting the tab to the right of the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((left, right, opposite), 'lro')
if left:
return QTabBar.SelectLeftTab
elif right:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.get('tabs', 'select-on-remove')
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs->select-on-remove' set to "
"'previous'!")
return None
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def tab_close(self, left=False, right=False, opposite=False, count=None):
"""Close the current/[count]th tab.
Args:
left: Force selecting the tab to the left of the current tab.
right: Force selecting the tab to the right of the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs->select-on-remove'.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(left, right,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window', count='count',
completion=[usertypes.Completion.url])
def openurl(self, url=None, bg=False, tab=False, window=False, count=None):
"""Open a URL in the current/[count]th tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
count: The tab index to open the URL in, or None.
"""
if url is None:
if tab or bg or window:
url = config.get('general', 'default-page')
else:
raise cmdexc.CommandError("No URL given, but -t/-b/-w is not "
"set!")
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if tab or bg or window:
self._open(url, tab, bg, window)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none exists
# yet.
self._tabbed_browser.tabopen(url)
else:
# Explicit count with a tab that doesn't exist.
return
else:
curtab.openurl(url)
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window', count='count')
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
if force:
tab.page().triggerAction(QWebPage.ReloadAndBypassCache)
else:
tab.reload()
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window', count='count')
def printpage(self, preview=False, count=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
"""
if not qtutils.check_print_compat():
# WORKAROUND (remove this when we bump the requirements to 5.3.0)
raise cmdexc.CommandError(
"Printing on Qt < 5.3.0 on Windows is broken, please upgrade!")
tab = self._cntwidget(count)
if tab is not None:
if preview:
diag = QPrintPreviewDialog()
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() |
Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(tab.print)
diag.exec_()
else:
diag = QPrintDialog()
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.open(lambda: tab.print(diag.printer()))
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
if bg and window:
raise cmdexc.CommandError("Only one of -b/-w can be given!")
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs-are-windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser()
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg, explicit=True)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.get('tabs', 'show-favicons'):
new_tabbed_browser.setTabIcon(idx, curtab.icon())
newtab.keep_icon = True
newtab.setZoomFactor(curtab.zoomFactor())
history = qtutils.serialize(curtab.history())
qtutils.deserialize(history, newtab.history())
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_detach(self):
"""Detach the current tab to its own window."""
url = self._current_url()
self._open(url, window=True)
cur_widget = self._current_widget()
self._tabbed_browser.close_tab(cur_widget)
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
if (not forward and not
self._current_widget().page().history().canGoBack()):
raise cmdexc.CommandError("At beginning of history.")
if (forward and not
self._current_widget().page().history().canGoForward()):
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
for _ in range(count):
if forward:
widget.forward()
else:
widget.back()
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
def _navigate_incdec(self, url, incdec, tab, background, window):
"""Helper method for :navigate when `where' is increment/decrement.
Args:
url: The current url.
incdec: Either 'increment' or 'decrement'.
tab: Whether to open the link in a new tab.
background: Open the link in a new background tab.
window: Open the link in a new window.
"""
segments = config.get('general', 'url-incdec-segments')
if segments is None:
segments = set()
else:
segments = set(segments)
try:
new_url = urlutils.incdec_number(url, incdec, segments=segments)
except urlutils.IncDecError as error:
raise cmdexc.CommandError(error.msg)
self._open(new_url, tab, background, window)
def _navigate_up(self, url, tab, background, window):
"""Helper method for :navigate when `where' is up.
Args:
url: The current url.
tab: Whether to open the link in a new tab.
background: Open the link in a new background tab.
window: Open the link in a new window.
"""
path = url.path()
if not path or path == '/':
raise cmdexc.CommandError("Can't go up!")
new_path = posixpath.join(path, posixpath.pardir)
url.setPath(new_path)
self._open(url, tab, background, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def navigate(self, where: {'type': ('prev', 'next', 'up', 'increment',
'decrement')},
tab=False, bg=False, window=False):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
frame = widget.page().currentFrame()
url = self._current_url()
if frame is None:
raise cmdexc.CommandError("No frame focused!")
hintmanager = objreg.get('hintmanager', scope='tab', tab='current')
if where == 'prev':
hintmanager.follow_prevnext(frame, url, prev=True, tab=tab,
background=bg, window=window)
elif where == 'next':
hintmanager.follow_prevnext(frame, url, prev=False, tab=tab,
background=bg, window=window)
elif where == 'up':
self._navigate_up(url, tab, bg, window)
elif where in ('decrement', 'increment'):
self._navigate_incdec(url, where, tab, bg, window)
else:
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window', count='count')
def scroll_px(self, dx: {'type': float}, dy: {'type': float}, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in x-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().page().currentFrame().scroll(dx, dy)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window', count='count')
def scroll(self,
direction: {'type': (str, float)},
dy: {'type': float, 'hide': True}=None,
count=1):
"""Scroll the current tab in the given direction.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
dy: Deprecated argument to support the old dx/dy form.
count: multiplier
"""
# pylint: disable=too-many-locals
try:
# Check for deprecated dx/dy form (like with scroll-px).
dx = float(direction)
dy = float(dy)
except (ValueError, TypeError):
# Invalid values will get handled later.
pass
else:
message.warning(self._win_id, ":scroll with dx/dy arguments is "
"deprecated - use :scroll-px instead!")
self.scroll_px(dx, dy, count=count)
return
fake_keys = {
'up': Qt.Key_Up,
'down': Qt.Key_Down,
'left': Qt.Key_Left,
'right': Qt.Key_Right,
'top': Qt.Key_Home,
'bottom': Qt.Key_End,
'page-up': Qt.Key_PageUp,
'page-down': Qt.Key_PageDown,
}
try:
key = fake_keys[direction]
except KeyError:
expected_values = ', '.join(sorted(fake_keys))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
widget = self._current_widget()
frame = widget.page().currentFrame()
press_evt = QKeyEvent(QEvent.KeyPress, key, Qt.NoModifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, Qt.NoModifier, 0, 0, 0)
# Count doesn't make sense with top/bottom
if direction in ('top', 'bottom'):
count = 1
max_min = {
'up': [Qt.Vertical, frame.scrollBarMinimum],
'down': [Qt.Vertical, frame.scrollBarMaximum],
'left': [Qt.Horizontal, frame.scrollBarMinimum],
'right': [Qt.Horizontal, frame.scrollBarMaximum],
'page-up': [Qt.Vertical, frame.scrollBarMinimum],
'page-down': [Qt.Vertical, frame.scrollBarMaximum],
}
for _ in range(count):
# Abort scrolling if the minimum/maximum was reached.
try:
qt_dir, getter = max_min[direction]
except KeyError:
pass
else:
if frame.scrollBarValue(qt_dir) == getter(qt_dir):
return
widget.keyPressEvent(press_evt)
widget.keyReleaseEvent(release_evt)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window', count='count')
def scroll_perc(self, perc: {'type': float}=None,
horizontal: {'flag': 'x'}=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
self._scroll_percent(perc, count,
Qt.Horizontal if horizontal else Qt.Vertical)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window', count='count')
def scroll_page(self, x: {'type': float}, y: {'type': float}, *,
top_navigate: {'type': ('prev', 'decrement'),
'metavar': 'ACTION'}=None,
bottom_navigate: {'type': ('next', 'increment'),
'metavar': 'ACTION'}=None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
frame = self._current_widget().page().currentFrame()
if not frame.url().isValid():
# See https://github.com/The-Compiler/qutebrowser/issues/701
return
if (bottom_navigate is not None and
frame.scrollPosition().y() >=
frame.scrollBarMaximum(Qt.Vertical)):
self.navigate(bottom_navigate)
return
elif top_navigate is not None and frame.scrollPosition().y() == 0:
self.navigate(top_navigate)
return
mult_x = count * x
mult_y = count * y
if mult_y.is_integer():
if mult_y == 0:
pass
elif mult_y < 0:
self.scroll('page-up', count=-int(mult_y))
elif mult_y > 0:
self.scroll('page-down', count=int(mult_y))
mult_y = 0
if mult_x == 0 and mult_y == 0:
return
size = frame.geometry()
dx = mult_x * size.width()
dy = mult_y * size.height()
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
frame.scroll(dx, dy)
@cmdutils.register(instance='command-dispatcher', scope='window')
def yank(self, title=False, sel=False, domain=False):
"""Yank the current URL/title to the clipboard or primary selection.
Args:
sel: Use the primary selection instead of the clipboard.
title: Yank the title instead of the URL.
domain: Yank only the scheme, domain, and port number.
"""
clipboard = QApplication.clipboard()
if title:
s = self._tabbed_browser.page_title(self._current_index())
what = 'title'
elif domain:
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
what = 'domain'
else:
s = self._current_url().toString(
QUrl.FullyEncoded | QUrl.RemovePassword)
what = 'URL'
if sel and clipboard.supportsSelection():
mode = QClipboard.Selection
target = "primary selection"
else:
mode = QClipboard.Clipboard
target = "clipboard"
log.misc.debug("Yanking to {}: '{}'".format(target, s))
clipboard.setText(s, mode)
message.info(self._win_id, "Yanked {} to {}: {}".format(
what, target, s))
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info(self._win_id, "Zoom level: {}%".format(perc))
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info(self._win_id, "Zoom level: {}%".format(perc))
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def zoom(self, zoom: {'type': int}=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither of both is
given, the zoom is set to the default zoom.
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
try:
default = config.get('ui', 'default-zoom')
level = cmdutils.arg_or_count(zoom, count, default=default)
except ValueError as e:
raise cmdexc.CommandError(e)
tab = self._current_widget()
try:
tab.zoom_perc(level)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info(self._win_id, "Zoom level: {}%".format(level))
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, left=False, right=False):
"""Close all tabs except for the current one.
Args:
left: Keep tabs to the left of the current.
right: Keep tabs to the right of the current.
"""
cmdutils.check_exclusive((left, right), 'lr')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
for i, tab in enumerate(self._tabbed_browser.widgets()):
if (i == cur_idx or (left and i < cur_idx) or
(right and i > cur_idx)):
continue
else:
self._tabbed_browser.close_tab(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open a closed tab (optionally skipping [count] closed tabs)."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.get('tabs', 'wrap'):
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
def paste(self, sel=False, tab=False, bg=False, window=False):
"""Open a page from the clipboard.
Args:
sel: Use the primary selection instead of the clipboard.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in new window.
"""
clipboard = QApplication.clipboard()
if sel and clipboard.supportsSelection():
mode = QClipboard.Selection
target = "Primary selection"
else:
mode = QClipboard.Clipboard
target = "Clipboard"
text = clipboard.text(mode)
if not text:
raise cmdexc.CommandError("{} is empty.".format(target))
log.misc.debug("{} contained: '{}'".format(target, text))
try:
url = urlutils.fuzzy_url(text)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def tab_focus(self, index: {'type': (int, 'last')}=None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab.
count: The tab index to focus, starting with 1.
"""
if index == 'last':
self._tab_focus_last()
return
if index is None and count is None:
self.tab_next()
return
try:
idx = cmdutils.arg_or_count(index, count, default=1,
countzero=self._count())
except ValueError as e:
raise cmdexc.CommandError(e)
cmdutils.check_overflow(idx + 1, 'int')
if 1 <= idx <= self._count():
self._set_current_index(idx - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
idx))
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count')
def tab_move(self, direction: {'type': ('+', '-')}=None, count=None):
"""Move the current tab.
Args:
direction: `+` or `-` for relative moving, not given for absolute
moving.
count: If moving absolutely: New position (default: 0)
If moving relatively: Offset.
"""
if direction is None:
new_idx = self._tab_move_absolute(count)
elif direction in '+-':
try:
new_idx = self._tab_move_relative(direction, count)
except ValueError:
raise cmdexc.CommandError("Count must be given for relative "
"moving!")
else:
raise cmdexc.CommandError("Invalid direction '{}'!".format(
direction))
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx))
tab = self._current_widget()
cur_idx = self._current_index()
icon = self._tabbed_browser.tabIcon(cur_idx)
label = self._tabbed_browser.page_title(cur_idx)
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.setUpdatesEnabled(False)
try:
self._tabbed_browser.removeTab(cur_idx)
self._tabbed_browser.insertTab(new_idx, tab, icon, label)
self._set_current_index(new_idx)
finally:
self._tabbed_browser.setUpdatesEnabled(True)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def spawn(self, cmdline, userscript=False, verbose=False, detach=False):
"""Spawn a command in a shell.
Note the {url} variable which gets replaced by the current URL might be
useful here.
Args:
userscript: Run the command as a userscript. Either store the
userscript in `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`), or use an absolute path.
verbose: Show notifications when the command started/exited.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self.run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(self._win_id, what='command',
verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.get('general', 'startpage')[0])
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated='Use :spawn --userscript instead!')
def run_userscript(self, cmd, *args: {'nargs': '*'}, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
webview = self._tabbed_browser.currentWidget()
if webview is None:
mainframe = None
else:
if webview.hasSelection():
env['QUTE_SELECTED_TEXT'] = webview.selectedText()
env['QUTE_SELECTED_HTML'] = webview.selectedHtml()
mainframe = webview.page().mainFrame()
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
env.update(userscripts.store_source(mainframe))
userscripts.run(cmd, *args, win_id=self._win_id, env=env,
verbose=verbose)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._win_id, self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0,
completion=[usertypes.Completion.quickmark_by_name])
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self):
"""Save the current page as a bookmark."""
bookmark_manager = objreg.get('bookmark-manager')
url = self._current_url()
try:
bookmark_manager.add(url, self._current_title())
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
message.info(self._win_id,
"Bookmarked {}!".format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0,
completion=[usertypes.Completion.bookmark_by_url])
def bookmark_load(self, url, tab=False, bg=False, window=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
"""
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window')
def follow_selected(self, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
widget = self._current_widget()
page = widget.page()
if not page.hasSelection():
return
if QWebSettings.globalSettings().testAttribute(
QWebSettings.JavascriptEnabled):
if tab:
page.open_target = usertypes.ClickTarget.tab
page.currentFrame().evaluateJavaScript(
'window.getSelection().anchorNode.parentNode.click()')
else:
try:
selected_element = xml.etree.ElementTree.fromstring(
'<html>' + widget.selectedHtml() + '</html>').find('a')
except xml.etree.ElementTree.ParseError:
raise cmdexc.CommandError('Could not parse selected element!')
if selected_element is not None:
try:
url = selected_element.attrib['href']
except KeyError:
raise cmdexc.CommandError('Anchor element without href!')
url = self._current_url().resolved(QUrl(url))
self._open(url, tab)
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
cur = self._current_widget()
if cur.inspector is None:
if not config.get('general', 'developer-extras'):
raise cmdexc.CommandError(
"Please enable developer-extras before using the "
"webinspector!")
cur.inspector = inspector.WebInspector()
cur.inspector.setPage(cur.page())
cur.inspector.show()
elif cur.inspector.isVisible():
cur.inspector.hide()
else:
if not config.get('general', 'developer-extras'):
raise cmdexc.CommandError(
"Please enable developer-extras before using the "
"webinspector!")
else:
cur.inspector.show()
@cmdutils.register(instance='command-dispatcher', scope='window')
def download(self, url=None, dest=None):
"""Download a given URL, or current page if no URL given.
Args:
url: The URL to download. If not given, download the current page.
dest: The file path to write the download to, or None to ask.
"""
download_manager = objreg.get('download-manager', scope='window',
window=self._win_id)
if url:
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, filename=dest)
else:
page = self._current_widget().page()
download_manager.get(self._current_url(), page=page)
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated="Use :download instead.")
def download_page(self):
"""Download the current page."""
self.download()
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page."""
# pylint: disable=no-member
# https://bitbucket.org/logilab/pylint/issue/491/
widget = self._current_widget()
if widget.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
frame = widget.page().currentFrame()
html = frame.toHtml()
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table')
highlighted = pygments.highlight(html, lexer, formatter)
current_url = self._current_url()
tab = self._tabbed_browser.tabopen(explicit=True)
tab.setHtml(highlighted, current_url)
tab.viewing_source = True
@cmdutils.register(instance='command-dispatcher', name='help',
completion=[usertypes.Completion.helptopic],
scope='window')
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__\->__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif '->' in topic:
parts = topic.split('->')
if len(parts) != 2:
raise cmdexc.CommandError("Invalid help topic {}!".format(
topic))
try:
config.get(*parts)
except configexc.NoSectionError:
raise cmdexc.CommandError("Invalid section {}!".format(
parts[0]))
except configexc.NoOptionError:
raise cmdexc.CommandError("Invalid option {}!".format(
parts[1]))
path = 'settings.html#{}'.format(topic.replace('->', '-'))
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher',
modes=[KeyMode.insert], hide=True, scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`general -> editor` config option.
"""
frame = self._current_widget().page().currentFrame()
try:
elem = webelem.focus_elem(frame)
except webelem.IsNullError:
raise cmdexc.CommandError("No element focused!")
if not elem.is_editable(strict=True):
raise cmdexc.CommandError("Focused element is not editable!")
if elem.is_content_editable():
text = str(elem)
else:
text = elem.evaluateJavaScript('this.value')
self._editor = editor.ExternalEditor(
self._win_id, self._tabbed_browser)
self._editor.editing_finished.connect(
functools.partial(self.on_editing_finished, elem))
self._editor.edit(text)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
if elem.is_content_editable():
log.misc.debug("Filling element {} via setPlainText.".format(
elem.debug_text()))
elem.setPlainText(text)
else:
log.misc.debug("Filling element {} via javascript.".format(
elem.debug_text()))
text = webelem.javascript_escape(text)
elem.evaluateJavaScript("this.value='{}'".format(text))
except webelem.IsNullError:
raise cmdexc.CommandError("Element vanished while editing!")
def _clear_search(self, view, text):
"""Clear search string/highlights for the given view.
This does nothing if the view's search text is the same as the given
text.
"""
if view.search_text is not None and view.search_text != text:
# We first clear the marked text, then the highlights
view.search('', 0)
view.search('', QWebPage.HighlightAllOccurrences)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
view = self._current_widget()
self._clear_search(view, text)
flags = 0
ignore_case = config.get('general', 'ignore-case')
if ignore_case == 'smart':
if not text.islower():
flags |= QWebPage.FindCaseSensitively
elif not ignore_case:
flags |= QWebPage.FindCaseSensitively
if config.get('general', 'wrap-search'):
flags |= QWebPage.FindWrapsAroundDocument
if reverse:
flags |= QWebPage.FindBackward
# We actually search *twice* - once to highlight everything, then again
# to get a mark so we can navigate.
view.search(text, flags)
view.search(text, flags | QWebPage.HighlightAllOccurrences)
view.search_text = text
view.search_flags = flags
self._tabbed_browser.search_text = text
self._tabbed_browser.search_flags = flags
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window', count='count')
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
view = self._current_widget()
self._clear_search(view, self._tabbed_browser.search_text)
if self._tabbed_browser.search_text is not None:
view.search_text = self._tabbed_browser.search_text
view.search_flags = self._tabbed_browser.search_flags
view.search(view.search_text,
view.search_flags | QWebPage.HighlightAllOccurrences)
for _ in range(count):
view.search(view.search_text, view.search_flags)
@cmdutils.register(instance='command-dispatcher', hide=True,
scope='window', count='count')
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
view = self._current_widget()
self._clear_search(view, self._tabbed_browser.search_text)
if self._tabbed_browser.search_text is not None:
view.search_text = self._tabbed_browser.search_text
view.search_flags = self._tabbed_browser.search_flags
view.search(view.search_text,
view.search_flags | QWebPage.HighlightAllOccurrences)
# The int() here serves as a QFlags constructor to create a copy of the
# QFlags instance rather as a reference. I don't know why it works this
# way, but it does.
flags = int(view.search_flags)
if flags & QWebPage.FindBackward:
flags &= ~QWebPage.FindBackward
else:
flags |= QWebPage.FindBackward
for _ in range(count):
view.search(view.search_text, flags)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToNextLine
else:
act = QWebPage.SelectNextLine
for _ in range(count):
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToPreviousLine
else:
act = QWebPage.SelectPreviousLine
for _ in range(count):
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToNextChar
else:
act = QWebPage.SelectNextChar
for _ in range(count):
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToPreviousChar
else:
act = QWebPage.SelectPreviousChar
for _ in range(count):
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToNextWord
else:
act = QWebPage.SelectNextWord
for _ in range(count):
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = [QWebPage.MoveToNextWord, QWebPage.MoveToNextChar]
else:
act = [QWebPage.SelectNextWord, QWebPage.SelectNextChar]
for _ in range(count):
for a in act:
webview.triggerPageAction(a)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToPreviousWord
else:
act = QWebPage.SelectPreviousWord
for _ in range(count):
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToStartOfLine
else:
act = QWebPage.SelectStartOfLine
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToEndOfLine
else:
act = QWebPage.SelectEndOfLine
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = [QWebPage.MoveToEndOfBlock, QWebPage.MoveToNextLine,
QWebPage.MoveToStartOfBlock]
else:
act = [QWebPage.SelectEndOfBlock, QWebPage.SelectNextLine,
QWebPage.SelectStartOfBlock]
for _ in range(count):
for a in act:
webview.triggerPageAction(a)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = [QWebPage.MoveToStartOfBlock, QWebPage.MoveToPreviousLine,
QWebPage.MoveToStartOfBlock]
else:
act = [QWebPage.SelectStartOfBlock, QWebPage.SelectPreviousLine,
QWebPage.SelectStartOfBlock]
for _ in range(count):
for a in act:
webview.triggerPageAction(a)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = [QWebPage.MoveToEndOfBlock, QWebPage.MoveToNextLine,
QWebPage.MoveToEndOfBlock]
else:
act = [QWebPage.SelectEndOfBlock, QWebPage.SelectNextLine,
QWebPage.SelectEndOfBlock]
for _ in range(count):
for a in act:
webview.triggerPageAction(a)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window', count='count')
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
webview = self._current_widget()
if not webview.selection_enabled:
act = [QWebPage.MoveToStartOfBlock, QWebPage.MoveToPreviousLine,
QWebPage.MoveToEndOfBlock]
else:
act = [QWebPage.SelectStartOfBlock, QWebPage.SelectPreviousLine,
QWebPage.SelectEndOfBlock]
for _ in range(count):
for a in act:
webview.triggerPageAction(a)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToStartOfDocument
else:
act = QWebPage.SelectStartOfDocument
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
webview = self._current_widget()
if not webview.selection_enabled:
act = QWebPage.MoveToEndOfDocument
else:
act = QWebPage.SelectEndOfDocument
webview.triggerPageAction(act)
@cmdutils.register(instance='command-dispatcher', scope='window')
def yank_selected(self, sel=False, keep=False):
"""Yank the selected text to the clipboard or primary selection.
Args:
sel: Use the primary selection instead of the clipboard.
keep: If given, stay in visual mode after yanking.
"""
s = self._current_widget().selectedText()
if not self._current_widget().hasSelection() or len(s) == 0:
message.info(self._win_id, "Nothing to yank")
return
clipboard = QApplication.clipboard()
if sel and clipboard.supportsSelection():
mode = QClipboard.Selection
target = "primary selection"
else:
mode = QClipboard.Clipboard
target = "clipboard"
log.misc.debug("Yanking to {}: '{}'".format(target, s))
clipboard.setText(s, mode)
message.info(self._win_id, "{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.maybe_leave(self._win_id, KeyMode.caret, "yank selected")
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
widget = self._current_widget()
widget.selection_enabled = not widget.selection_enabled
mainwindow = objreg.get('main-window', scope='window',
window=self._win_id)
mainwindow.status.set_mode_active(usertypes.KeyMode.caret, True)
@cmdutils.register(instance='command-dispatcher', hide=True,
modes=[KeyMode.caret], scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().triggerPageAction(QWebPage.MoveToNextChar)
@cmdutils.register(instance='command-dispatcher', scope='window',
count='count', debug=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
See http://doc.qt.io/qt-5/qwebpage.html#WebAction-enum for the
available actions.
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
member = getattr(QWebPage, action, None)
if not isinstance(member, QWebPage.WebAction):
raise cmdexc.CommandError("{} is not a valid web action!".format(
action))
view = self._current_widget()
for _ in range(count):
view.triggerPageAction(member)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, quiet=False):
"""Evaluate a JavaScript string.
Args:
js_code: The string to evaluate.
quiet: Don't show resulting JS object.
"""
frame = self._current_widget().page().mainFrame()
out = frame.evaluateJavaScript(js_code)
if quiet:
return
if out is None:
# Getting the actual error (if any) seems to be difficult. The
# error does end up in BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the webpage
# is not trivial...
message.info(self._win_id, 'No output or error')
else:
# The output can be a string, number, dict, array, etc. But *don't*
# output too much data, as this will make qutebrowser hang
out = str(out)
if len(out) > 5000:
message.info(self._win_id, out[:5000] + ' [...trimmed...]')
else:
message.info(self._win_id, out)
| 1 | 13,521 | Please make `_path_suggestion` public (i.e. remove the `_`) in `downloads.py`. | qutebrowser-qutebrowser | py |
@@ -40,6 +40,8 @@ func init() {
flag.StringVar(&revoke, "revoke", "", "Hostname for which to revoke the certificate")
flag.StringVar(&serverType, "type", "http", "Type of server to run")
flag.BoolVar(&version, "version", false, "Show version")
+ flag.StringVar(&caddytls.HTTPChallengePort, "external-port-80-is-local-port", "80", "Local port exposed externally as port 80")
+ flag.StringVar(&caddytls.TLSSNIChallengePort, "external-port-443-is-local-port", "443", "Local port exposed externally as port 443")
caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader))
caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader)) | 1 | package caddymain
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"gopkg.in/natefinch/lumberjack.v2"
"github.com/xenolf/lego/acme"
"github.com/mholt/caddy"
// plug in the HTTP server type
_ "github.com/mholt/caddy/caddyhttp"
"github.com/mholt/caddy/caddytls"
// This is where other plugins get plugged in (imported)
)
func init() {
caddy.TrapSignals()
setVersion()
flag.BoolVar(&caddytls.Agreed, "agree", false, "Agree to the CA's Subscriber Agreement")
flag.StringVar(&caddytls.DefaultCAUrl, "ca", "https://acme-v01.api.letsencrypt.org/directory", "URL to certificate authority's ACME server directory")
flag.StringVar(&conf, "conf", "", "Caddyfile to load (default \""+caddy.DefaultConfigFile+"\")")
flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
flag.StringVar(&caddytls.DefaultEmail, "email", "", "Default ACME CA account email address")
flag.StringVar(&logfile, "log", "", "Process log file")
flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file")
flag.BoolVar(&caddy.Quiet, "quiet", false, "Quiet mode (no initialization output)")
flag.StringVar(&revoke, "revoke", "", "Hostname for which to revoke the certificate")
flag.StringVar(&serverType, "type", "http", "Type of server to run")
flag.BoolVar(&version, "version", false, "Show version")
caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader))
caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader))
}
// Run is Caddy's main() function.
func Run() {
flag.Parse()
caddy.AppName = appName
caddy.AppVersion = appVersion
acme.UserAgent = appName + "/" + appVersion
// Set up process log before anything bad happens
switch logfile {
case "stdout":
log.SetOutput(os.Stdout)
case "stderr":
log.SetOutput(os.Stderr)
case "":
log.SetOutput(ioutil.Discard)
default:
log.SetOutput(&lumberjack.Logger{
Filename: logfile,
MaxSize: 100,
MaxAge: 14,
MaxBackups: 10,
})
}
// Check for one-time actions
if revoke != "" {
err := caddytls.Revoke(revoke)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Revoked certificate for %s\n", revoke)
os.Exit(0)
}
if version {
fmt.Printf("%s %s\n", appName, appVersion)
if devBuild && gitShortStat != "" {
fmt.Printf("%s\n%s\n", gitShortStat, gitFilesModified)
}
os.Exit(0)
}
if plugins {
fmt.Println(caddy.DescribePlugins())
os.Exit(0)
}
moveStorage() // TODO: This is temporary for the 0.9 release, or until most users upgrade to 0.9+
// Set CPU cap
err := setCPU(cpu)
if err != nil {
mustLogFatal(err)
}
// Get Caddyfile input
caddyfile, err := caddy.LoadCaddyfile(serverType)
if err != nil {
mustLogFatal(err)
}
// Start your engines
instance, err := caddy.Start(caddyfile)
if err != nil {
mustLogFatal(err)
}
// Twiddle your thumbs
instance.Wait()
}
// mustLogFatal wraps log.Fatal() in a way that ensures the
// output is always printed to stderr so the user can see it
// if the user is still there, even if the process log was not
// enabled. If this process is an upgrade, however, and the user
// might not be there anymore, this just logs to the process
// log and exits.
func mustLogFatal(args ...interface{}) {
if !caddy.IsUpgrade() {
log.SetOutput(os.Stderr)
}
log.Fatal(args...)
}
// confLoader loads the Caddyfile using the -conf flag.
func confLoader(serverType string) (caddy.Input, error) {
if conf == "" {
return nil, nil
}
if conf == "stdin" {
return caddy.CaddyfileFromPipe(os.Stdin)
}
contents, err := ioutil.ReadFile(conf)
if err != nil {
return nil, err
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: conf,
ServerTypeName: serverType,
}, nil
}
// defaultLoader loads the Caddyfile from the current working directory.
func defaultLoader(serverType string) (caddy.Input, error) {
contents, err := ioutil.ReadFile(caddy.DefaultConfigFile)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: caddy.DefaultConfigFile,
ServerTypeName: serverType,
}, nil
}
// moveStorage moves the old certificate storage location by
// renaming the "letsencrypt" folder to the hostname of the
// CA URL. This is TEMPORARY until most users have upgraded to 0.9+.
func moveStorage() {
oldPath := filepath.Join(caddy.AssetsPath(), "letsencrypt")
_, err := os.Stat(oldPath)
if os.IsNotExist(err) {
return
}
// Just use a default config to get default (file) storage
fileStorage, err := new(caddytls.Config).StorageFor(caddytls.DefaultCAUrl)
if err != nil {
log.Fatalf("[ERROR] Unable to get new path for certificate storage: %v", err)
}
newPath := string(fileStorage.(caddytls.FileStorage))
err = os.MkdirAll(string(newPath), 0700)
if err != nil {
log.Fatalf("[ERROR] Unable to make new certificate storage path: %v\n\nPlease follow instructions at:\nhttps://github.com/mholt/caddy/issues/902#issuecomment-228876011", err)
}
err = os.Rename(oldPath, string(newPath))
if err != nil {
log.Fatalf("[ERROR] Unable to migrate certificate storage: %v\n\nPlease follow instructions at:\nhttps://github.com/mholt/caddy/issues/902#issuecomment-228876011", err)
}
// convert mixed case folder and file names to lowercase
var done bool // walking is recursive and preloads the file names, so we must restart walk after a change until no changes
for !done {
done = true
filepath.Walk(string(newPath), func(path string, info os.FileInfo, err error) error {
// must be careful to only lowercase the base of the path, not the whole thing!!
base := filepath.Base(path)
if lowerBase := strings.ToLower(base); base != lowerBase {
lowerPath := filepath.Join(filepath.Dir(path), lowerBase)
err = os.Rename(path, lowerPath)
if err != nil {
log.Fatalf("[ERROR] Unable to lower-case: %v\n\nPlease follow instructions at:\nhttps://github.com/mholt/caddy/issues/902#issuecomment-228876011", err)
}
// terminate traversal and restart since Walk needs the updated file list with new file names
done = false
return errors.New("start over")
}
return nil
})
}
}
// setVersion figures out the version information
// based on variables set by -ldflags.
func setVersion() {
// A development build is one that's not at a tag or has uncommitted changes
devBuild = gitTag == "" || gitShortStat != ""
// Only set the appVersion if -ldflags was used
if gitNearestTag != "" || gitTag != "" {
if devBuild && gitNearestTag != "" {
appVersion = fmt.Sprintf("%s (+%s %s)",
strings.TrimPrefix(gitNearestTag, "v"), gitCommit, buildDate)
} else if gitTag != "" {
appVersion = strings.TrimPrefix(gitTag, "v")
}
}
}
// setCPU parses string cpu and sets GOMAXPROCS
// according to its value. It accepts either
// a number (e.g. 3) or a percent (e.g. 50%).
func setCPU(cpu string) error {
var numCPU int
availCPU := runtime.NumCPU()
if strings.HasSuffix(cpu, "%") {
// Percent
var percent float32
pctStr := cpu[:len(cpu)-1]
pctInt, err := strconv.Atoi(pctStr)
if err != nil || pctInt < 1 || pctInt > 100 {
return errors.New("invalid CPU value: percentage must be between 1-100")
}
percent = float32(pctInt) / 100
numCPU = int(float32(availCPU) * percent)
} else {
// Number
num, err := strconv.Atoi(cpu)
if err != nil || num < 1 {
return errors.New("invalid CPU value: provide a number or percent greater than 0")
}
numCPU = num
}
if numCPU > availCPU {
numCPU = availCPU
}
runtime.GOMAXPROCS(numCPU)
return nil
}
const appName = "Caddy"
// Flags that control program flow or startup
var (
serverType string
conf string
cpu string
logfile string
revoke string
version bool
plugins bool
)
// Build information obtained with the help of -ldflags
var (
appVersion = "(untracked dev build)" // inferred at startup
devBuild = true // inferred at startup
buildDate string // date -u
gitTag string // git describe --exact-match HEAD 2> /dev/null
gitNearestTag string // git describe --abbrev=0 --tags HEAD
gitCommit string // git rev-parse HEAD
gitShortStat string // git diff-index --shortstat
gitFilesModified string // git diff-index --name-only HEAD
)
| 1 | 8,929 | Woah, these flag names gotta get shorter. (Edit: I'll try to suggest some if needed, will think on it.) | caddyserver-caddy | go |
@@ -19,6 +19,14 @@ module Blacklight
end
end
end
+
+ initializer "blacklight.secret_key" do |app|
+ if app.respond_to?(:secrets)
+ Blacklight.secret_key ||= app.secrets.secret_key_base
+ elsif app.config.respond_to?(:secret_key_base)
+ Blacklight.secret_key ||= app.config.secret_key_base
+ end
+ end
end
-end
+end | 1 | module Blacklight
class Engine < Rails::Engine
engine_name "blacklight"
require 'bootstrap-sass'
require 'blacklight/rails/routes'
# BlacklightHelper is needed by all helpers, so we inject it
# into action view base here.
initializer 'blacklight.helpers' do |app|
ActionView::Base.send :include, BlacklightHelper
end
# This makes our rake tasks visible.
rake_tasks do
Dir.chdir(File.expand_path(File.join(File.dirname(__FILE__), '..'))) do
Dir.glob(File.join('railties', '*.rake')).each do |railtie|
load railtie
end
end
end
end
end | 1 | 5,291 | Is this the code that's supposed to use the Rails app's only when in Rails4? What's the point of the first `if app.respond_to?(:secrets)`, both the `if` and the `elsif` have the same body, is only the second one needed? If `app.config` has a `secret_key_base`, then use it, the end. Is there a need for first checking if `app.respond_to?(:secrets)`? | projectblacklight-blacklight | rb |
@@ -390,6 +390,9 @@ public:
m_nopayload_elems_bitmask =
enum_impl_strategy.getBitMaskForNoPayloadElements();
+ if (enum_decl->isObjC())
+ m_is_objc_enum = true;
+
LOG_PRINTF(LIBLLDB_LOG_TYPES, "m_nopayload_elems_bitmask = %s",
Dump(m_nopayload_elems_bitmask).c_str());
| 1 | //===-- SwiftASTContext.cpp -------------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "lldb/Symbol/SwiftASTContext.h"
// C++ Includes
#include <mutex> // std::once
#include <queue>
#include <set>
#include <sstream>
#include "swift/AST/ASTContext.h"
#include "swift/AST/ASTDemangler.h"
#include "swift/AST/ASTMangler.h"
#include "swift/AST/DebuggerClient.h"
#include "swift/AST/Decl.h"
#include "swift/AST/DiagnosticEngine.h"
#include "swift/AST/DiagnosticsSema.h"
#include "swift/AST/ExistentialLayout.h"
#include "swift/AST/GenericSignature.h"
#include "swift/AST/IRGenOptions.h"
#include "swift/AST/NameLookup.h"
#include "swift/AST/SearchPathOptions.h"
#include "swift/AST/SubstitutionMap.h"
#include "swift/AST/Type.h"
#include "swift/AST/Types.h"
#include "swift/ASTSectionImporter/ASTSectionImporter.h"
#include "swift/Basic/Dwarf.h"
#include "swift/Basic/LangOptions.h"
#include "swift/Basic/Platform.h"
#include "swift/Basic/PrimarySpecificPaths.h"
#include "swift/Basic/SourceManager.h"
#include "swift/ClangImporter/ClangImporter.h"
#include "swift/ClangImporter/ClangImporterOptions.h"
#include "swift/DWARFImporter/DWARFImporter.h"
#include "swift/Demangling/Demangle.h"
#include "swift/Demangling/ManglingMacros.h"
#include "swift/Driver/Util.h"
#include "swift/Frontend/Frontend.h"
#include "swift/Frontend/ParseableInterfaceModuleLoader.h"
#include "swift/Frontend/PrintingDiagnosticConsumer.h"
#include "swift/IDE/Utils.h"
#include "swift/IRGen/Linking.h"
#include "swift/SIL/SILModule.h"
#include "swift/Serialization/ModuleFile.h"
#include "swift/Serialization/Validation.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Driver/Driver.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "swift/../../lib/IRGen/FixedTypeInfo.h"
#include "swift/../../lib/IRGen/GenEnum.h"
#include "swift/../../lib/IRGen/GenHeap.h"
#include "swift/../../lib/IRGen/IRGenModule.h"
#include "swift/../../lib/IRGen/TypeInfo.h"
#include "swift/Serialization/SerializedModuleLoader.h"
#include "swift/Strings.h"
#include "Plugins/ExpressionParser/Clang/ClangHost.h"
#include "Plugins/ExpressionParser/Swift/SwiftDiagnostic.h"
#include "Plugins/ExpressionParser/Swift/SwiftUserExpression.h"
#include "lldb/Core/Debugger.h"
#include "lldb/Core/DumpDataExtractor.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/ModuleSpec.h"
#include "lldb/Core/PluginManager.h"
#include "lldb/Core/Section.h"
#include "lldb/Core/StreamFile.h"
#include "lldb/Core/ThreadSafeDenseMap.h"
#include "lldb/Expression/DiagnosticManager.h"
#include "lldb/Expression/IRExecutionUnit.h"
#include "lldb/Host/Host.h"
#include "lldb/Host/HostInfo.h"
#include "lldb/Host/StringConvert.h"
#include "lldb/Symbol/ClangASTContext.h"
#include "lldb/Symbol/CompileUnit.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/SourceModule.h"
#include "lldb/Symbol/SymbolFile.h"
#include "lldb/Symbol/SymbolVendor.h"
#include "lldb/Symbol/VariableList.h"
#include "lldb/Target/Platform.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/SwiftLanguageRuntime.h"
#include "lldb/Target/Target.h"
#include "lldb/Utility/ArchSpec.h"
#include "lldb/Utility/CleanUp.h"
#include "lldb/Utility/FileSpec.h"
#include "lldb/Utility/LLDBAssert.h"
#include "lldb/Utility/Log.h"
#include "lldb/Utility/Status.h"
#include "Plugins/Platform/MacOSX/PlatformDarwin.h"
#include "Plugins/SymbolFile/DWARF/DWARFASTParserSwift.h"
#define VALID_OR_RETURN(value) \
do { \
if (HasFatalErrors()) { \
return (value); \
} \
} while (0)
#define VALID_OR_RETURN_VOID() \
do { \
if (HasFatalErrors()) { \
return; \
} \
} while (0)
namespace {
/// This silly constexpr allows us to filter out the useless __FUNCTION__ name
/// of lambdas in the LOG_PRINTF macro.
bool constexpr IsLambda(const char *name) {
return name[0] && name[0] == 'o' && name[1] && name[1] == 'p' && name[2] &&
name[2] == 'e' && name[3] && name[3] == 'r' && name[4] &&
name[4] == 'a' && name[5] && name[5] == 't' && name[6] &&
name[6] == 'o' && name[7] && name[7] == 'r' && name[8] &&
name[8] == '(' && name[9] && name[9] == ')';
}
/// Used to sort the log output.
std::recursive_mutex g_log_mutex;
} // namespace
/// Similar to LLDB_LOG, but with richer contextual information.
#define LOG_PRINTF(CHANNEL, FMT, ...) \
do { \
if (Log *log = lldb_private::GetLogIfAllCategoriesSet(CHANNEL)) { \
std::lock_guard<std::recursive_mutex> locker(g_log_mutex); \
/* The format string is optimized for code size, not speed. */ \
log->Printf("%s::%s%s" FMT, m_description.c_str(), \
IsLambda(__FUNCTION__) ? "" : __FUNCTION__, \
(FMT && FMT[0] == '(') ? "" : "() -- ", ##__VA_ARGS__); \
} \
} while (0)
using namespace lldb;
using namespace lldb_private;
typedef lldb_private::ThreadSafeDenseMap<swift::ASTContext *, SwiftASTContext *>
ThreadSafeSwiftASTMap;
static ThreadSafeSwiftASTMap &GetASTMap() {
// The global destructor list will tear down all of the modules when
// the LLDB shared library is being unloaded and this needs to live
// beyond all of those and not be destructed before they have all
// gone away. So we will leak this list intentionally so we can
// avoid global destructor problems.
static ThreadSafeSwiftASTMap *g_map_ptr = NULL;
static std::once_flag g_once_flag;
std::call_once(g_once_flag, []() {
// Intentional leak.
g_map_ptr = new ThreadSafeSwiftASTMap();
});
return *g_map_ptr;
}
class SwiftEnumDescriptor;
typedef std::shared_ptr<SwiftEnumDescriptor> SwiftEnumDescriptorSP;
typedef llvm::DenseMap<lldb::opaque_compiler_type_t, SwiftEnumDescriptorSP>
EnumInfoCache;
typedef std::shared_ptr<EnumInfoCache> EnumInfoCacheSP;
typedef llvm::DenseMap<const swift::ASTContext *, EnumInfoCacheSP>
ASTEnumInfoCacheMap;
static EnumInfoCache *GetEnumInfoCache(const swift::ASTContext *a) {
static ASTEnumInfoCacheMap g_cache;
static std::mutex g_mutex;
std::lock_guard<std::mutex> locker(g_mutex);
ASTEnumInfoCacheMap::iterator pos = g_cache.find(a);
if (pos == g_cache.end()) {
g_cache.insert(
std::make_pair(a, std::shared_ptr<EnumInfoCache>(new EnumInfoCache())));
return g_cache.find(a)->second.get();
}
return pos->second.get();
}
namespace {
bool IsDirectory(const FileSpec &spec) {
return llvm::sys::fs::is_directory(spec.GetPath());
}
bool IsRegularFile(const FileSpec &spec) {
return llvm::sys::fs::is_regular_file(spec.GetPath());
}
} // namespace
llvm::LLVMContext &SwiftASTContext::GetGlobalLLVMContext() {
static llvm::LLVMContext s_global_context;
return s_global_context;
}
llvm::ArrayRef<swift::VarDecl *>
SwiftASTContext::GetStoredProperties(swift::NominalTypeDecl *nominal) {
VALID_OR_RETURN(llvm::ArrayRef<swift::VarDecl *>());
// Check whether we already have the stored properties for this
// nominal type.
auto known = m_stored_properties.find(nominal);
if (known != m_stored_properties.end())
return known->second;
// Collect the stored properties from the AST and put them in the
// cache.
auto stored_properties = nominal->getStoredProperties();
auto &stored = m_stored_properties[nominal];
stored = std::vector<swift::VarDecl *>(stored_properties.begin(),
stored_properties.end());
return stored;
}
class SwiftEnumDescriptor {
public:
enum class Kind {
Empty, ///< No cases in this enum.
CStyle, ///< No cases have payloads.
AllPayload, ///< All cases have payloads.
Mixed, ///< Some cases have payloads.
Resilient ///< A resilient enum.
};
struct ElementInfo {
lldb_private::ConstString name;
CompilerType payload_type;
bool has_payload : 1;
bool is_indirect : 1;
};
Kind GetKind() const { return m_kind; }
ConstString GetTypeName() { return m_type_name; }
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data,
bool no_payload) = 0;
virtual size_t GetNumElements() {
return GetNumElementsWithPayload() + GetNumCStyleElements();
}
virtual size_t GetNumElementsWithPayload() = 0;
virtual size_t GetNumCStyleElements() = 0;
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) = 0;
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) = 0;
virtual ~SwiftEnumDescriptor() = default;
static SwiftEnumDescriptor *CreateDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl);
protected:
SwiftEnumDescriptor(swift::ASTContext *ast, swift::CanType swift_can_type,
swift::EnumDecl *enum_decl, SwiftEnumDescriptor::Kind k)
: m_kind(k), m_type_name() {
if (swift_can_type.getPointer()) {
if (auto nominal = swift_can_type->getAnyNominal()) {
swift::Identifier name(nominal->getName());
if (name.get())
m_type_name.SetCString(name.get());
}
}
}
private:
Kind m_kind;
ConstString m_type_name;
};
class SwiftEmptyEnumDescriptor : public SwiftEnumDescriptor {
public:
SwiftEmptyEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::Empty) {}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data, bool no_payload) {
return nullptr;
}
virtual size_t GetNumElementsWithPayload() { return 0; }
virtual size_t GetNumCStyleElements() { return 0; }
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
return nullptr;
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
return nullptr;
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::Empty;
}
virtual ~SwiftEmptyEnumDescriptor() = default;
};
namespace std {
template <> struct less<swift::ClusteredBitVector> {
bool operator()(const swift::ClusteredBitVector &lhs,
const swift::ClusteredBitVector &rhs) const {
int iL = lhs.size() - 1;
int iR = rhs.size() - 1;
for (; iL >= 0 && iR >= 0; --iL, --iR) {
bool bL = lhs[iL];
bool bR = rhs[iR];
if (bL and not bR)
return false;
if (bR and not bL)
return true;
}
return false;
}
};
} // namespace std
static std::string Dump(const swift::ClusteredBitVector &bit_vector) {
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
for (size_t i = 0; i < bit_vector.size(); i++) {
if (bit_vector[i])
ostream << '1';
else
ostream << '0';
if ((i % 4) == 3)
ostream << ' ';
}
ostream.flush();
return buffer;
}
class SwiftCStyleEnumDescriptor : public SwiftEnumDescriptor {
llvm::SmallString<32> m_description = {"SwiftCStyleEnumDescriptor"};
public:
SwiftCStyleEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::CStyle),
m_nopayload_elems_bitmask(), m_elements(), m_element_indexes() {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "doing C-style enum layout for %s",
GetTypeName().AsCString());
SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast);
swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule();
const swift::irgen::EnumImplStrategy &enum_impl_strategy =
swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type);
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_no_payload =
enum_impl_strategy.getElementsWithNoPayload();
const bool has_payload = false;
const bool is_indirect = false;
uint64_t case_counter = 0;
m_nopayload_elems_bitmask =
enum_impl_strategy.getBitMaskForNoPayloadElements();
LOG_PRINTF(LIBLLDB_LOG_TYPES, "m_nopayload_elems_bitmask = %s",
Dump(m_nopayload_elems_bitmask).c_str());
for (auto enum_case : elements_with_no_payload) {
ConstString case_name(enum_case.decl->getName().str().data());
swift::ClusteredBitVector case_value =
enum_impl_strategy.getBitPatternForNoPayloadElement(enum_case.decl);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "case_name = %s, unmasked value = %s",
case_name.AsCString(), Dump(case_value).c_str());
case_value &= m_nopayload_elems_bitmask;
LOG_PRINTF(LIBLLDB_LOG_TYPES, "case_name = %s, masked value = %s",
case_name.AsCString(), Dump(case_value).c_str());
std::unique_ptr<ElementInfo> elem_info(
new ElementInfo{case_name, CompilerType(), has_payload, is_indirect});
m_element_indexes.emplace(case_counter, elem_info.get());
case_counter++;
m_elements.emplace(case_value, std::move(elem_info));
}
}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data, bool no_payload) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"C-style enum - inspecting data to find enum case for type %s",
GetTypeName().AsCString());
swift::ClusteredBitVector current_payload;
lldb::offset_t offset = 0;
for (size_t idx = 0; idx < data.GetByteSize(); idx++) {
uint64_t byte = data.GetU8(&offset);
current_payload.add(8, byte);
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "m_nopayload_elems_bitmask = %s",
Dump(m_nopayload_elems_bitmask).c_str());
LOG_PRINTF(LIBLLDB_LOG_TYPES, "current_payload = %s",
Dump(current_payload).c_str());
if (current_payload.size() != m_nopayload_elems_bitmask.size()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"sizes don't match; getting out with an error");
return nullptr;
}
// A C-Like Enum is laid out as an integer tag with the minimal number of
// bits to contain all of the cases. The cases are assigned tag values in
// declaration order. e.g.
// enum Patatino { // => LLVM i1
// case X // => i1 0
// case Y // => i1 1
// }
// From this we can find out the number of bits really used for the payload.
current_payload &= m_nopayload_elems_bitmask;
auto elem_mask =
swift::ClusteredBitVector::getConstant(current_payload.size(), false);
int64_t bit_count = m_elements.size() - 1;
if (bit_count > 0 && no_payload) {
uint64_t bit_set = 0;
while (bit_count > 0) {
elem_mask.setBit(bit_set);
bit_set += 1;
bit_count /= 2;
}
current_payload &= elem_mask;
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "masked current_payload = %s",
Dump(current_payload).c_str());
auto iter = m_elements.find(current_payload), end = m_elements.end();
if (iter == end) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "bitmask search failed");
return nullptr;
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "bitmask search success - found case %s",
iter->second.get()->name.AsCString());
return iter->second.get();
}
virtual size_t GetNumElementsWithPayload() { return 0; }
virtual size_t GetNumCStyleElements() { return m_elements.size(); }
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
return nullptr;
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
if (idx >= m_element_indexes.size())
return nullptr;
return m_element_indexes[idx];
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::CStyle;
}
virtual ~SwiftCStyleEnumDescriptor() = default;
private:
swift::ClusteredBitVector m_nopayload_elems_bitmask;
std::map<swift::ClusteredBitVector, std::unique_ptr<ElementInfo>> m_elements;
std::map<uint64_t, ElementInfo *> m_element_indexes;
};
class SwiftAllPayloadEnumDescriptor : public SwiftEnumDescriptor {
llvm::SmallString<32> m_description = {"SwiftAllPayloadEnumDescriptor"};
public:
SwiftAllPayloadEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::AllPayload) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "doing ADT-style enum layout for %s",
GetTypeName().AsCString());
SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast);
swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule();
const swift::irgen::EnumImplStrategy &enum_impl_strategy =
swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type);
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_payload = enum_impl_strategy.getElementsWithPayload();
m_tag_bits = enum_impl_strategy.getTagBitsForPayloads();
LOG_PRINTF(LIBLLDB_LOG_TYPES, "tag_bits = %s", Dump(m_tag_bits).c_str());
auto module_ctx = enum_decl->getModuleContext();
const bool has_payload = true;
for (auto enum_case : elements_with_payload) {
ConstString case_name(enum_case.decl->getName().str().data());
swift::EnumElementDecl *case_decl = enum_case.decl;
assert(case_decl);
auto arg_type = case_decl->getArgumentInterfaceType();
CompilerType case_type;
if (arg_type) {
case_type = {
swift_can_type->getTypeOfMember(module_ctx, case_decl, arg_type)
->getCanonicalType()
.getPointer()};
}
const bool is_indirect =
case_decl->isIndirect() || case_decl->getParentEnum()->isIndirect();
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"case_name = %s, type = %s, is_indirect = %s",
case_name.AsCString(), case_type.GetTypeName().AsCString(),
is_indirect ? "yes" : "no");
std::unique_ptr<ElementInfo> elem_info(
new ElementInfo{case_name, case_type, has_payload, is_indirect});
m_elements.push_back(std::move(elem_info));
}
}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data, bool no_payload) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"ADT-style enum - inspecting data to find enum case for type %s",
GetTypeName().AsCString());
// No elements, just fail.
if (m_elements.size() == 0) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "enum with no cases. getting out");
return nullptr;
}
// One element, so it's got to be it.
if (m_elements.size() == 1) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"enum with one case. getting out easy with %s",
m_elements.front().get()->name.AsCString());
return m_elements.front().get();
}
swift::ClusteredBitVector current_payload;
lldb::offset_t offset = 0;
for (size_t idx = 0; idx < data.GetByteSize(); idx++) {
uint64_t byte = data.GetU8(&offset);
current_payload.add(8, byte);
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "tag_bits = %s",
Dump(m_tag_bits).c_str());
LOG_PRINTF(LIBLLDB_LOG_TYPES, "current_payload = %s",
Dump(current_payload).c_str());
if (current_payload.size() != m_tag_bits.size()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"sizes don't match; getting out with an error");
return nullptr;
}
size_t discriminator = 0;
size_t power_of_2 = 1;
auto enumerator = m_tag_bits.enumerateSetBits();
for (llvm::Optional<size_t> next = enumerator.findNext(); next.hasValue();
next = enumerator.findNext()) {
discriminator =
discriminator + (current_payload[next.getValue()] ? power_of_2 : 0);
power_of_2 <<= 1;
}
// The discriminator is too large?
if (discriminator >= m_elements.size()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"discriminator value of %" PRIu64 " too large, getting out",
(uint64_t)discriminator);
return nullptr;
} else {
auto ptr = m_elements[discriminator].get();
if (!ptr)
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"discriminator value of %" PRIu64
" acceptable, but null case matched - that's bad",
(uint64_t)discriminator);
else
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"discriminator value of %" PRIu64
" acceptable, case %s matched",
(uint64_t)discriminator, ptr->name.AsCString());
return ptr;
}
}
virtual size_t GetNumElementsWithPayload() { return m_elements.size(); }
virtual size_t GetNumCStyleElements() { return 0; }
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
if (idx >= m_elements.size())
return nullptr;
return m_elements[idx].get();
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
return nullptr;
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::AllPayload;
}
virtual ~SwiftAllPayloadEnumDescriptor() = default;
private:
swift::ClusteredBitVector m_tag_bits;
std::vector<std::unique_ptr<ElementInfo>> m_elements;
};
class SwiftMixedEnumDescriptor : public SwiftEnumDescriptor {
public:
SwiftMixedEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::Mixed),
m_non_payload_cases(ast, swift_can_type, enum_decl),
m_payload_cases(ast, swift_can_type, enum_decl) {}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data, bool no_payload) {
ElementInfo *elem_info =
m_non_payload_cases.GetElementFromData(data, false);
return elem_info ? elem_info
: m_payload_cases.GetElementFromData(data, false);
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::Mixed;
}
virtual size_t GetNumElementsWithPayload() {
return m_payload_cases.GetNumElementsWithPayload();
}
virtual size_t GetNumCStyleElements() {
return m_non_payload_cases.GetNumCStyleElements();
}
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
return m_payload_cases.GetElementWithPayloadAtIndex(idx);
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
return m_non_payload_cases.GetElementWithNoPayloadAtIndex(idx);
}
virtual ~SwiftMixedEnumDescriptor() = default;
private:
SwiftCStyleEnumDescriptor m_non_payload_cases;
SwiftAllPayloadEnumDescriptor m_payload_cases;
};
class SwiftResilientEnumDescriptor : public SwiftEnumDescriptor {
llvm::SmallString<32> m_description = {"SwiftResilientEnumDescriptor"};
public:
SwiftResilientEnumDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl)
: SwiftEnumDescriptor(ast, swift_can_type, enum_decl,
SwiftEnumDescriptor::Kind::Resilient) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "doing resilient enum layout for %s",
GetTypeName().AsCString());
}
virtual ElementInfo *
GetElementFromData(const lldb_private::DataExtractor &data, bool no_payload) {
// Not yet supported by LLDB.
return nullptr;
}
virtual size_t GetNumElementsWithPayload() { return 0; }
virtual size_t GetNumCStyleElements() { return 0; }
virtual ElementInfo *GetElementWithPayloadAtIndex(size_t idx) {
return nullptr;
}
virtual ElementInfo *GetElementWithNoPayloadAtIndex(size_t idx) {
return nullptr;
}
static bool classof(const SwiftEnumDescriptor *S) {
return S->GetKind() == SwiftEnumDescriptor::Kind::Resilient;
}
virtual ~SwiftResilientEnumDescriptor() = default;
};
SwiftEnumDescriptor *
SwiftEnumDescriptor::CreateDescriptor(swift::ASTContext *ast,
swift::CanType swift_can_type,
swift::EnumDecl *enum_decl) {
assert(ast);
assert(enum_decl);
assert(swift_can_type.getPointer());
SwiftASTContext *swift_ast_ctx = SwiftASTContext::GetSwiftASTContext(ast);
assert(swift_ast_ctx);
swift::irgen::IRGenModule &irgen_module = swift_ast_ctx->GetIRGenModule();
const swift::irgen::EnumImplStrategy &enum_impl_strategy =
swift::irgen::getEnumImplStrategy(irgen_module, swift_can_type);
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_payload = enum_impl_strategy.getElementsWithPayload();
llvm::ArrayRef<swift::irgen::EnumImplStrategy::Element>
elements_with_no_payload = enum_impl_strategy.getElementsWithNoPayload();
swift::SILType swift_sil_type = irgen_module.getLoweredType(swift_can_type);
if (!irgen_module.getTypeInfo(swift_sil_type).isFixedSize())
return new SwiftResilientEnumDescriptor(ast, swift_can_type, enum_decl);
if (elements_with_no_payload.size() == 0) {
// Nothing with no payload.. empty or all payloads?
if (elements_with_payload.size() == 0)
return new SwiftEmptyEnumDescriptor(ast, swift_can_type, enum_decl);
return new SwiftAllPayloadEnumDescriptor(ast, swift_can_type, enum_decl);
}
// Something with no payload.. mixed or C-style?
if (elements_with_payload.size() == 0)
return new SwiftCStyleEnumDescriptor(ast, swift_can_type, enum_decl);
return new SwiftMixedEnumDescriptor(ast, swift_can_type, enum_decl);
}
static SwiftEnumDescriptor *
GetEnumInfoFromEnumDecl(swift::ASTContext *ast, swift::CanType swift_can_type,
swift::EnumDecl *enum_decl) {
return SwiftEnumDescriptor::CreateDescriptor(ast, swift_can_type, enum_decl);
}
SwiftEnumDescriptor *SwiftASTContext::GetCachedEnumInfo(void *type) {
VALID_OR_RETURN(nullptr);
if (type) {
EnumInfoCache *enum_info_cache = GetEnumInfoCache(GetASTContext());
EnumInfoCache::const_iterator pos = enum_info_cache->find(type);
if (pos != enum_info_cache->end())
return pos->second.get();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (!SwiftASTContext::IsFullyRealized({swift_can_type}))
return nullptr;
SwiftEnumDescriptorSP enum_info_sp;
if (auto *enum_type = swift_can_type->getAs<swift::EnumType>()) {
enum_info_sp.reset(GetEnumInfoFromEnumDecl(
GetASTContext(), swift_can_type, enum_type->getDecl()));
} else if (auto *bound_enum_type =
swift_can_type->getAs<swift::BoundGenericEnumType>()) {
enum_info_sp.reset(GetEnumInfoFromEnumDecl(
GetASTContext(), swift_can_type, bound_enum_type->getDecl()));
}
if (enum_info_sp.get())
enum_info_cache->insert(std::make_pair(type, enum_info_sp));
return enum_info_sp.get();
}
return nullptr;
}
namespace {
static inline bool
SwiftASTContextSupportsLanguage(lldb::LanguageType language) {
return language == eLanguageTypeSwift;
}
static bool IsDeviceSupport(const char *path) {
// The old-style check, which we preserve for safety.
if (path && strstr(path, "iOS DeviceSupport"))
return true;
// The new-style check, which should cover more devices.
if (path)
if (const char *Developer_Xcode = strstr(path, "Developer"))
if (const char *DeviceSupport = strstr(Developer_Xcode, "DeviceSupport"))
if (strstr(DeviceSupport, "Symbols"))
return true;
// Don't look in the simulator runtime frameworks either. They
// either duplicate what the SDK has, or for older simulators
// conflict with them.
if (path && strstr(path, ".simruntime/Contents/Resources/"))
return true;
return false;
}
} // namespace
SwiftASTContext::SwiftASTContext(std::string description, llvm::Triple triple,
Target *target)
: TypeSystem(TypeSystem::eKindSwift),
m_compiler_invocation_ap(new swift::CompilerInvocation()),
m_description(description) {
// Set the clang modules cache path.
llvm::SmallString<128> path;
auto props = ModuleList::GetGlobalModuleListProperties();
props.GetClangModulesCachePath().GetPath(path);
m_compiler_invocation_ap->setClangModuleCachePath(path);
if (target)
m_target_wp = target->shared_from_this();
SetTriple(triple);
swift::IRGenOptions &ir_gen_opts =
m_compiler_invocation_ap->getIRGenOptions();
ir_gen_opts.OutputKind = swift::IRGenOutputKind::Module;
ir_gen_opts.UseJIT = true;
ir_gen_opts.DWARFVersion = swift::DWARFVersion;
}
SwiftASTContext::SwiftASTContext(const SwiftASTContext &rhs)
: TypeSystem(rhs.getKind()),
m_compiler_invocation_ap(new swift::CompilerInvocation()),
m_description(rhs.m_description) {
if (rhs.m_compiler_invocation_ap) {
SetTriple(rhs.GetTriple());
llvm::StringRef module_cache_path =
rhs.m_compiler_invocation_ap->getClangModuleCachePath();
m_compiler_invocation_ap->setClangModuleCachePath(module_cache_path);
}
swift::IRGenOptions &ir_gen_opts =
m_compiler_invocation_ap->getIRGenOptions();
ir_gen_opts.OutputKind = swift::IRGenOutputKind::Module;
ir_gen_opts.UseJIT = true;
TargetSP target_sp = rhs.m_target_wp.lock();
if (target_sp)
m_target_wp = target_sp;
m_platform_sdk_path = rhs.m_platform_sdk_path;
// Initialize search paths and clang importer options, we need
// them to grab a SwiftASTContext without asserting.
std::vector<std::string> module_search_paths;
std::vector<std::pair<std::string, bool>> framework_search_paths;
const auto &opts = rhs.m_compiler_invocation_ap->getSearchPathOptions();
module_search_paths.insert(module_search_paths.end(),
opts.ImportSearchPaths.begin(),
opts.ImportSearchPaths.end());
for (const auto &fwsp : opts.FrameworkSearchPaths)
framework_search_paths.push_back({fwsp.Path, fwsp.IsSystem});
InitializeSearchPathOptions(module_search_paths, framework_search_paths);
GetClangImporterOptions();
// As this is a copy constructor, make sure we copy the clang importer
// options from RHS to LHS.
GetCompilerInvocation().getClangImporterOptions() =
rhs.m_compiler_invocation_ap->getClangImporterOptions();
}
SwiftASTContext::~SwiftASTContext() {
if (swift::ASTContext *ctx = m_ast_context_ap.get()) {
// A RemoteASTContext associated with this swift::ASTContext has
// to be destroyed before the swift::ASTContext is destroyed.
if (TargetSP target_sp = m_target_wp.lock())
if (ProcessSP process_sp = target_sp->GetProcessSP())
if (auto *runtime = process_sp->GetSwiftLanguageRuntime())
runtime->ReleaseAssociatedRemoteASTContext(ctx);
GetASTMap().Erase(ctx);
}
}
ConstString SwiftASTContext::GetPluginNameStatic() {
return ConstString("swift");
}
ConstString SwiftASTContext::GetPluginName() {
return ClangASTContext::GetPluginNameStatic();
}
uint32_t SwiftASTContext::GetPluginVersion() { return 1; }
namespace {
enum SDKType : int {
MacOSX = 0,
iPhoneSimulator,
iPhoneOS,
AppleTVSimulator,
AppleTVOS,
WatchSimulator,
watchOS,
Linux,
numSDKTypes,
unknown = -1
};
const char *const sdk_strings[] = {
"macosx", "iphonesimulator", "iphoneos", "appletvsimulator",
"appletvos", "watchsimulator", "watchos", "linux"};
struct SDKTypeMinVersion {
SDKType sdk_type;
unsigned min_version_major;
unsigned min_version_minor;
};
} // namespace
/// Return the SDKType (+minimum version needed for Swift support) for
/// the target triple, if that makes sense. Otherwise, return the
/// unknown sdk type.
static SDKTypeMinVersion GetSDKType(const llvm::Triple &target,
const llvm::Triple &host) {
// Only Darwin platforms know the concept of an SDK.
auto host_os = host.getOS();
if (host_os != llvm::Triple::OSType::MacOSX)
return {SDKType::unknown, 0, 0};
auto is_simulator = [&]() -> bool {
return target.getEnvironment() == llvm::Triple::Simulator ||
!target.getArchName().startswith("arm");
};
switch (target.getOS()) {
case llvm::Triple::OSType::MacOSX:
case llvm::Triple::OSType::Darwin:
return {SDKType::MacOSX, 10, 10};
case llvm::Triple::OSType::IOS:
if (is_simulator())
return {SDKType::iPhoneSimulator, 8, 0};
return {SDKType::iPhoneOS, 8, 0};
case llvm::Triple::OSType::TvOS:
if (is_simulator())
return {SDKType::AppleTVSimulator, 9, 0};
return {SDKType::AppleTVOS, 9, 0};
case llvm::Triple::OSType::WatchOS:
if (is_simulator())
return {SDKType::WatchSimulator, 2, 0};
return {SDKType::watchOS, 2, 0};
default:
return {SDKType::unknown, 0, 0};
}
}
static StringRef GetXcodeContentsPath() {
static std::once_flag g_once_flag;
static std::string g_xcode_contents_path;
std::call_once(g_once_flag, [&]() {
const char substr[] = ".app/Contents/";
// First, try based on the current shlib's location.
if (FileSpec fspec = HostInfo::GetShlibDir()) {
std::string path_to_shlib = fspec.GetPath();
size_t pos = path_to_shlib.rfind(substr);
if (pos != std::string::npos) {
path_to_shlib.erase(pos + strlen(substr));
g_xcode_contents_path = path_to_shlib;
return;
}
}
// Fall back to using xcrun.
if (HostInfo::GetArchitecture().GetTriple().getOS() ==
llvm::Triple::MacOSX) {
int status = 0;
int signo = 0;
std::string output;
const char *command = "xcrun -sdk macosx --show-sdk-path";
lldb_private::Status error = Host::RunShellCommand(
command, // shell command to run
NULL, // current working directory
&status, // Put the exit status of the process in here
&signo, // Put the signal that caused the process to exit in here
&output, // Get the output from the command and place it in this
// string
std::chrono::seconds(
3)); // Timeout in seconds to wait for shell program to finish
if (status == 0 && !output.empty()) {
size_t first_non_newline = output.find_last_not_of("\r\n");
if (first_non_newline != std::string::npos) {
output.erase(first_non_newline + 1);
}
size_t pos = output.rfind(substr);
if (pos != std::string::npos) {
output.erase(pos + strlen(substr));
g_xcode_contents_path = output;
}
}
}
});
return g_xcode_contents_path;
}
static std::string GetCurrentToolchainPath() {
const char substr[] = ".xctoolchain/";
{
if (FileSpec fspec = HostInfo::GetShlibDir()) {
std::string path_to_shlib = fspec.GetPath();
size_t pos = path_to_shlib.rfind(substr);
if (pos != std::string::npos) {
path_to_shlib.erase(pos + strlen(substr));
return path_to_shlib;
}
}
}
return {};
}
static std::string GetCurrentCLToolsPath() {
const char substr[] = "/CommandLineTools/";
{
if (FileSpec fspec = HostInfo::GetShlibDir()) {
std::string path_to_shlib = fspec.GetPath();
size_t pos = path_to_shlib.rfind(substr);
if (pos != std::string::npos) {
path_to_shlib.erase(pos + strlen(substr));
return path_to_shlib;
}
}
}
return {};
}
/// Return the name of the OS-specific subdirectory containing the
/// Swift stdlib needed for \p target.
StringRef SwiftASTContext::GetSwiftStdlibOSDir(const llvm::Triple &target,
const llvm::Triple &host) {
auto sdk = GetSDKType(target, host);
if (sdk.sdk_type != SDKType::unknown)
return sdk_strings[sdk.sdk_type];
return target.getOSName();
}
StringRef SwiftASTContext::GetResourceDir(const llvm::Triple &triple) {
static std::mutex g_mutex;
std::lock_guard<std::mutex> locker(g_mutex);
StringRef platform_sdk_path = GetPlatformSDKPath();
auto swift_stdlib_os_dir =
GetSwiftStdlibOSDir(triple, HostInfo::GetArchitecture().GetTriple());
// The resource dir depends on the SDK path and the expected os name.
llvm::SmallString<128> key(platform_sdk_path);
key.append(swift_stdlib_os_dir);
static llvm::StringMap<std::string> g_resource_dir_cache;
auto it = g_resource_dir_cache.find(key);
if (it != g_resource_dir_cache.end())
return it->getValue();
auto value =
GetResourceDir(platform_sdk_path, swift_stdlib_os_dir,
HostInfo::GetSwiftDir().GetPath(), GetXcodeContentsPath(),
GetCurrentToolchainPath(), GetCurrentCLToolsPath());
g_resource_dir_cache.insert({key, value});
return g_resource_dir_cache[key];
}
std::string SwiftASTContext::GetResourceDir(StringRef platform_sdk_path,
StringRef swift_stdlib_os_dir,
std::string swift_dir,
std::string xcode_contents_path,
std::string toolchain_path,
std::string cl_tools_path) {
llvm::SmallString<16> m_description("SwiftASTContext");
// First, check if there's something in our bundle.
{
FileSpec swift_dir_spec(swift_dir);
if (swift_dir_spec) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "trying ePathTypeSwiftDir: %s",
swift_dir_spec.GetCString());
// We can't just check for the Swift directory, because that
// always exists. We have to look for "clang" inside that.
FileSpec swift_clang_dir_spec = swift_dir_spec;
swift_clang_dir_spec.AppendPathComponent("clang");
if (IsDirectory(swift_clang_dir_spec)) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"found Swift resource dir via ePathTypeSwiftDir': %s",
swift_dir_spec.GetCString());
return swift_dir_spec.GetPath();
}
}
}
// Nothing in our bundle. Are we in a toolchain that has its own Swift
// compiler resource dir?
{
llvm::SmallString<256> path(toolchain_path);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "trying toolchain path: %s", path.c_str());
if (!path.empty()) {
llvm::sys::path::append(path, "usr/lib/swift");
LOG_PRINTF(LIBLLDB_LOG_TYPES, "trying toolchain-based lib path: %s",
path.c_str());
if (IsDirectory(FileSpec(path))) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"found Swift resource dir via "
"toolchain path + 'usr/lib/swift': %s",
path.c_str());
return path.str();
}
}
}
// We're not in a toolchain that has one. Use the Xcode default toolchain.
{
llvm::SmallString<256> path(xcode_contents_path);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "trying Xcode path: %s", path.c_str());
if (!path.empty()) {
llvm::sys::path::append(path, "Developer",
"Toolchains/XcodeDefault.xctoolchain",
"usr/lib/swift");
LOG_PRINTF(LIBLLDB_LOG_TYPES, "trying Xcode-based lib path: %s",
path.c_str());
if (IsDirectory(FileSpec(path))) {
StringRef resource_dir = path;
llvm::sys::path::append(path, swift_stdlib_os_dir);
std::string s = path.str();
if (IsDirectory(FileSpec(path))) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"found Swift resource dir via "
"Xcode contents path + default toolchain "
"relative dir: %s",
resource_dir.str().c_str());
return resource_dir;
} else {
// Search the SDK for a matching cross-SDK.
path = platform_sdk_path;
llvm::sys::path::append(path, "usr/lib/swift");
StringRef resource_dir = path;
llvm::sys::path::append(path, swift_stdlib_os_dir);
if (IsDirectory(FileSpec(path))) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"found Swift resource dir via "
"Xcode contents path + cross-compilation SDK "
"relative dir: %s",
resource_dir.str().c_str());
return resource_dir;
}
}
}
}
}
// We're not in Xcode. We might be in the command-line tools.
{
llvm::SmallString<256> path(cl_tools_path);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "trying command-line tools path: %s",
path.c_str());
if (!path.empty()) {
llvm::sys::path::append(path, "usr/lib/swift");
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"trying command-line tools-based lib path: %s", path.c_str());
if (IsDirectory(FileSpec(path))) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"found Swift resource dir via command-line tools "
"path + usr/lib/swift: %s",
path.c_str());
return path.str();
}
}
}
// We might be in the build-dir configuration for a
// build-script-driven LLDB build, which has the Swift build dir as
// a sibling directory to the lldb build dir. This looks much
// different than the install- dir layout that the previous checks
// would try.
{
FileSpec faux_swift_dir_spec(swift_dir);
if (faux_swift_dir_spec) {
// We can't use a C++11 stdlib regex feature here because it doesn't
// work on Ubuntu 14.04 x86_64. Once we don't care about supporting
// that anymore, let's pull the code below back in since it is a
// simpler implementation using std::regex.
#if 0
// Let's try to regex this.
// We're looking for /some/path/lldb-{os}-{arch}, and want to
// build the following:
// /some/path/swift-{os}-{arch}/lib/swift/{os}/{arch}
// In a match, these are the following assignments for
// backrefs:
// $1 - first part of path before swift build dir
// $2 - the host OS path separator character
// $3 - all the stuff that should come after changing
// lldb to swift for the lib dir.
auto match_regex =
std::regex("^(.+([/\\\\]))lldb-(.+)$");
const std::string replace_format = "$1swift-$3";
const std::string faux_swift_dir =
faux_swift_dir_spec.GetCString();
const std::string build_tree_resource_dir =
std::regex_replace(faux_swift_dir, match_regex,
replace_format);
#else
std::string build_tree_resource_dir;
const std::string faux_swift_dir = faux_swift_dir_spec.GetCString();
// Find something that matches lldb- (particularly,
// the last one).
const std::string lldb_dash("lldb-");
auto lldb_pos = faux_swift_dir.rfind(lldb_dash);
if ((lldb_pos != std::string::npos) && (lldb_pos > 0) &&
((faux_swift_dir[lldb_pos - 1] == '\\') ||
(faux_swift_dir[lldb_pos - 1] == '/'))) {
// We found something that matches ^.+[/\\]lldb-.+$
std::ostringstream stream;
// Take everything before lldb- (the path leading up to
// the lldb dir).
stream << faux_swift_dir.substr(0, lldb_pos);
// replace lldb- with swift-.
stream << "swift-";
// and now tack on the same components from after
// the lldb- part.
stream << faux_swift_dir.substr(lldb_pos + lldb_dash.length());
const std::string build_tree_resource_dir = stream.str();
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"trying ePathTypeSwiftDir regex-based build dir: %s",
build_tree_resource_dir.c_str());
FileSpec swift_resource_dir_spec(build_tree_resource_dir.c_str());
if (IsDirectory(swift_resource_dir_spec)) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"found Swift resource dir via "
"ePathTypeSwiftDir + inferred build-tree dir: %s",
swift_resource_dir_spec.GetCString());
return swift_resource_dir_spec.GetCString();
}
}
#endif
}
}
// We failed to find a reasonable Swift resource dir.
LOG_PRINTF(LIBLLDB_LOG_TYPES, "failed to find a Swift resource dir");
return {};
}
/// This code comes from CompilerInvocation.cpp (setRuntimeResourcePath).
static void ConfigureResourceDirs(swift::CompilerInvocation &invocation,
FileSpec resource_dir, llvm::Triple triple) {
// Make sure the triple is right:
invocation.setTargetTriple(triple.str());
invocation.setRuntimeResourcePath(resource_dir.GetPath().c_str());
}
static const char *getImportFailureString(swift::serialization::Status status) {
switch (status) {
case swift::serialization::Status::Valid:
return "The module is valid.";
case swift::serialization::Status::FormatTooOld:
return "The module file format is too old to be used by this version of "
"the debugger.";
case swift::serialization::Status::FormatTooNew:
return "The module file format is too new to be used by this version of "
"the debugger.";
case swift::serialization::Status::MissingDependency:
return "The module file depends on another module that can't be loaded.";
case swift::serialization::Status::MissingShadowedModule:
return "The module file is an overlay for a Clang module, which can't be "
"found.";
case swift::serialization::Status::CircularDependency:
return "The module file depends on a module that is still being loaded, "
"i.e. there is a circular dependency.";
case swift::serialization::Status::FailedToLoadBridgingHeader:
return "The module file depends on a bridging header that can't be loaded.";
case swift::serialization::Status::Malformed:
return "The module file is malformed in some way.";
case swift::serialization::Status::MalformedDocumentation:
return "The module documentation file is malformed in some way.";
case swift::serialization::Status::NameMismatch:
return "The module file's name does not match the module it is being "
"loaded into.";
case swift::serialization::Status::TargetIncompatible:
return "The module file was built for a different target platform.";
case swift::serialization::Status::TargetTooNew:
return "The module file was built for a target newer than the current "
"target.";
default:
return "An unknown error occurred.";
}
}
/// Initialize the compiler invocation with it the search paths from a
/// serialized AST.
/// \returns true on success.
static bool DeserializeCompilerFlags(swift::CompilerInvocation &invocation,
StringRef section_data_ref, StringRef name,
llvm::raw_ostream &error) {
auto result = invocation.loadFromSerializedAST(section_data_ref);
if (result == swift::serialization::Status::Valid)
return true;
error << "Could not deserialize " << name << ":\n"
<< getImportFailureString(result) << "\n";
return false;
}
static void printASTValidationError(
llvm::raw_ostream &errs,
const swift::serialization::ValidationInfo &ast_info,
const swift::serialization::ExtendedValidationInfo &ext_ast_info,
Module &module, StringRef module_buf, bool invalid_name,
bool invalid_size) {
const char *error = getImportFailureString(ast_info.status);
errs << "AST validation error";
if (!invalid_name)
errs << " in \"" << ast_info.name << '"';
errs << ": ";
// Instead of printing the generic Status::Malformed error, be specific.
if (invalid_size)
errs << "The serialized module is corrupted.";
else if (invalid_name)
errs << "The serialized module has an invalid name.";
else
errs << error;
llvm::SmallString<1> m_description;
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
LLDB_LOG(log, R"(Unable to load Swift AST for module "{0}" from library "{1}".
{2}
- targetTriple: {3}
- shortVersion: {4}
- bytes: {5} (module_buf bytes: {6})
- SDK path: {7}
- Clang Importer Options:
)",
ast_info.name, module.GetSpecificationDescription(), error,
ast_info.targetTriple, ast_info.shortVersion, ast_info.bytes,
module_buf.size(), ext_ast_info.getSDKPath());
for (StringRef ExtraOpt : ext_ast_info.getExtraClangImporterOptions())
LLDB_LOG(log, " -- {0}", ExtraOpt);
}
void SwiftASTContext::DiagnoseWarnings(Process &process, Module &module) const {
for (const std::string &message : m_module_import_warnings)
process.PrintWarningCantLoadSwiftModule(module, message);
}
/// Retrieve the serialized AST data blobs and initialize the compiler
/// invocation with the concatenated search paths from the blobs.
/// \returns true if an error was encountered.
static bool DeserializeAllCompilerFlags(SwiftASTContext &swift_ast,
Module &module,
const std::string &m_description,
llvm::raw_ostream &error,
bool &got_serialized_options) {
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
bool found_validation_errors = false;
std::string last_sdk_path;
got_serialized_options = false;
auto &invocation = swift_ast.GetCompilerInvocation();
SymbolVendor *sym_vendor = module.GetSymbolVendor();
if (!sym_vendor)
return false;
auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Found %d AST file data entries.",
(int)ast_file_datas.size());
// If no N_AST symbols exist, this is not an error.
if (ast_file_datas.empty())
return false;
// An AST section consists of one or more AST modules, optionally
// with headers. Iterate over all AST modules.
for (auto ast_file_data_sp : ast_file_datas) {
llvm::StringRef buf((const char *)ast_file_data_sp->GetBytes(),
ast_file_data_sp->GetByteSize());
swift::serialization::ValidationInfo info;
for (; !buf.empty(); buf = buf.substr(info.bytes)) {
swift::serialization::ExtendedValidationInfo extended_validation_info;
info = swift::serialization::validateSerializedAST(
buf, &extended_validation_info);
bool invalid_ast = info.status != swift::serialization::Status::Valid;
bool invalid_size = (info.bytes == 0) || (info.bytes > buf.size());
bool invalid_name = info.name.empty();
if (invalid_ast || invalid_size || invalid_name) {
// Validation errors are diagnosed, but not fatal for the context.
found_validation_errors = true;
printASTValidationError(error, info, extended_validation_info, module,
buf, invalid_name, invalid_size);
// If there's a size error, quit the loop early, otherwise try the next.
if (invalid_size)
break;
continue;
}
StringRef moduleData = buf.substr(0, info.bytes);
got_serialized_options |=
DeserializeCompilerFlags(invocation, moduleData, info.name, error);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "SDK path from module \"%s\" is \"%s\".",
info.name.str().c_str(),
invocation.getSDKPath().str().c_str());
if (!last_sdk_path.empty()) {
// Always let the more specific SDK path win.
if (invocation.getSDKPath() != last_sdk_path)
if (last_sdk_path.size() > invocation.getSDKPath().size())
invocation.setSDKPath(last_sdk_path);
}
last_sdk_path = invocation.getSDKPath();
}
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Picking SDK path \"%s\".",
invocation.getSDKPath().str().c_str());
return found_validation_errors;
}
/// Return whether this module contains any serialized Swift ASTs.
bool HasSwiftModules(Module &module) {
SymbolVendor *sym_vendor = module.GetSymbolVendor();
if (!sym_vendor)
return false;
auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift);
return !ast_file_datas.empty();
}
namespace {
/// Calls arg.consume_front(<options>) and returns true on success.
/// \param prefix contains the consumed prefix.
bool ConsumeIncludeOption(StringRef &arg, StringRef &prefix) {
static StringRef options[] = {"-I",
"-F",
"-fmodule-map-file=",
"-iquote",
"-idirafter",
"-iframeworkwithsysroot",
"-iframework",
"-iprefix",
"-iwithprefixbefore",
"-iwithprefix",
"-isystemafter",
"-isystem",
"-isysroot"};
for (StringRef &option : options)
if (arg.consume_front(option)) {
prefix = option;
return true;
}
return false;
}
/// Turn relative paths in clang options into absolute paths based on
/// \c cur_working_dir.
template <typename SmallString>
void ApplyWorkingDir(SmallString &clang_argument, StringRef cur_working_dir) {
StringRef arg = clang_argument.str();
StringRef prefix;
if (ConsumeIncludeOption(arg, prefix)) {
// Ignore the option part of a double-arg include option.
if (arg.empty())
return;
} else if (arg.startswith("-")) {
// Assume this is a compiler arg and not a path starting with "-".
return;
}
// There is most probably a path in arg now.
if (!llvm::sys::path::is_relative(arg))
return;
SmallString rel_path = arg;
clang_argument = prefix;
llvm::sys::path::append(clang_argument, cur_working_dir, rel_path);
llvm::sys::path::remove_dots(clang_argument);
}
} // namespace
void SwiftASTContext::AddExtraClangArgs(std::vector<std::string> ExtraArgs) {
llvm::SmallString<128> cur_working_dir;
llvm::SmallString<128> clang_argument;
for (const std::string &arg : ExtraArgs) {
// Join multi-arg -D and -U options for uniquing.
clang_argument += arg;
if (clang_argument == "-D" || clang_argument == "-U" ||
clang_argument == "-working-directory")
continue;
// Enable uniquing for -D and -U options.
bool is_macro = (clang_argument.size() >= 2 && clang_argument[0] == '-' &&
(clang_argument[1] == 'D' || clang_argument[1] == 'U'));
bool unique = is_macro;
// Consume any -working-directory arguments.
StringRef cwd(clang_argument);
if (cwd.consume_front("-working-directory"))
cur_working_dir = cwd;
else {
// Otherwise add the argument to the list.
if (!is_macro)
ApplyWorkingDir(clang_argument, cur_working_dir);
AddClangArgument(clang_argument.str(), unique);
}
clang_argument.clear();
}
}
void SwiftASTContext::AddUserClangArgs(TargetProperties &props) {
Args args(props.GetSwiftExtraClangFlags());
std::vector<std::string> user_clang_flags;
for (const auto &arg : args.entries())
user_clang_flags.push_back(arg.ref);
AddExtraClangArgs(user_clang_flags);
}
void SwiftASTContext::RemapClangImporterOptions(
const PathMappingList &path_map) {
auto &options = GetClangImporterOptions();
std::string remapped;
if (path_map.RemapPath(options.BridgingHeader, remapped)) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "remapped %s -> %s",
options.BridgingHeader.c_str(), remapped.c_str());
options.BridgingHeader = remapped;
}
// Previous argument was the dash-option of an option pair.
bool remap_next = false;
for (auto &arg_string : options.ExtraArgs) {
StringRef prefix;
StringRef arg = arg_string;
if (remap_next)
remap_next = false;
else if (ConsumeIncludeOption(arg, prefix)) {
if (arg.empty()) {
// Option pair.
remap_next = true;
continue;
}
// Single-arg include option with prefix.
} else {
// Not a recognized option.
continue;
}
if (path_map.RemapPath(arg, remapped)) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "remapped %s -> %s%s", arg.str().c_str(),
prefix.str().c_str(), remapped.c_str());
arg_string = prefix.str() + remapped;
}
}
}
/// Retrieve the .dSYM bundle for \p module.
static llvm::Optional<StringRef> GetDSYMBundle(Module &module) {
SymbolVendor *sym_vendor = module.GetSymbolVendor();
if (!sym_vendor)
return {};
auto sym_file = sym_vendor->GetSymbolFile();
if (!sym_file)
return {};
auto obj_file = sym_file->GetObjectFile();
if (!obj_file)
return {};
StringRef dir = obj_file->GetFileSpec().GetDirectory().GetStringRef();
auto it = llvm::sys::path::rbegin(dir);
auto end = llvm::sys::path::rend(dir);
if (it == end)
return {};
if (*it != "DWARF")
return {};
if (++it == end)
return {};
if (*it != "Resources")
return {};
if (++it == end)
return {};
if (*it != "Contents")
return {};
StringRef sep = llvm::sys::path::get_separator();
StringRef dsym = dir.take_front(it - end - sep.size());
if (llvm::sys::path::extension(dsym) != ".dSYM")
return {};
return dsym;
}
lldb::TypeSystemSP SwiftASTContext::CreateInstance(lldb::LanguageType language,
Module &module,
Target *target,
bool fallback) {
std::vector<std::string> module_search_paths;
std::vector<std::pair<std::string, bool>> framework_search_paths;
if (!SwiftASTContextSupportsLanguage(language))
return lldb::TypeSystemSP();
StreamString ss;
ss << "SwiftASTContext";
if (fallback)
ss << "ForExpressions";
ss << '(' << '"';
module.GetDescription(&ss, eDescriptionLevelBrief);
ss << '"' << ')';
ss.Flush();
std::string m_description(ss.GetString().str());
ArchSpec arch = module.GetArchitecture();
ObjectFile *objfile = module.GetObjectFile();
if (!objfile)
return {};
ArchSpec object_arch = objfile->GetArchitecture();
if (!object_arch.IsValid())
return {};
lldb::CompUnitSP main_compile_unit_sp = module.GetCompileUnitAtIndex(0);
if (lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES) &&
main_compile_unit_sp &&
!FileSystem::Instance().Exists(*main_compile_unit_sp)) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"Corresponding source not found for %s, loading module "
"is unlikely to succeed",
main_compile_unit_sp->GetCString());
}
llvm::Triple triple = arch.GetTriple();
if (triple.getOS() == llvm::Triple::UnknownOS) {
// cl_kernels are the only binaries that don't have an
// LC_MIN_VERSION_xxx load command. This avoids a Swift assertion.
#if defined(__APPLE__)
switch (triple.getArch()) {
default:
triple.setOS(llvm::Triple::MacOSX);
break;
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
triple.setOS(llvm::Triple::IOS);
break;
}
#else
// Not an elegant hack on OS X, not an elegant hack elsewhere.
// But we shouldn't be claiming things are Mac binaries when they
// are not.
triple.setOS(HostInfo::GetArchitecture().GetTriple().getOS());
#endif
}
// If there is a target this may be a fallback scratch context.
assert((!fallback || target) && "fallback context must specify a target");
std::shared_ptr<SwiftASTContext> swift_ast_sp(
fallback ? (new SwiftASTContextForExpressions(m_description, *target))
: (new SwiftASTContext(
m_description,
target ? target->GetArchitecture().GetTriple() : triple,
target)));
// This is a module AST context, mark it as such.
swift_ast_sp->m_is_scratch_context = false;
swift_ast_sp->GetLanguageOptions().DebuggerSupport = true;
swift_ast_sp->GetLanguageOptions().EnableAccessControl = false;
swift_ast_sp->GetLanguageOptions().EnableTargetOSChecking = false;
if (!arch.IsValid())
return TypeSystemSP();
swift_ast_sp->SetTriple(triple, &module);
bool set_triple = false;
SymbolVendor *sym_vendor = module.GetSymbolVendor();
std::string target_triple;
if (sym_vendor) {
bool got_serialized_options;
llvm::SmallString<0> error;
llvm::raw_svector_ostream errs(error);
if (DeserializeAllCompilerFlags(*swift_ast_sp, module, m_description, errs,
got_serialized_options)) {
// Validation errors are not fatal for the context.
swift_ast_sp->m_module_import_warnings.push_back(error.str());
}
// Some of the bits in the compiler options we keep separately, so
// we need to populate them from the serialized options:
llvm::StringRef serialized_triple =
swift_ast_sp->GetCompilerInvocation().getTargetTriple();
if (serialized_triple.empty()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Serialized triple was empty.");
} else {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Found serialized triple %s.",
serialized_triple.str().c_str());
swift_ast_sp->SetTriple(llvm::Triple(serialized_triple), &module);
set_triple = true;
}
llvm::StringRef serialized_sdk_path =
swift_ast_sp->GetCompilerInvocation().getSDKPath();
if (serialized_sdk_path.empty()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "No serialized SDK path.");
} else {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Got serialized SDK path %s.",
serialized_sdk_path.data());
FileSpec sdk_spec(serialized_sdk_path.data());
if (FileSystem::Instance().Exists(sdk_spec)) {
swift_ast_sp->SetPlatformSDKPath(serialized_sdk_path);
}
}
if (!got_serialized_options || swift_ast_sp->GetPlatformSDKPath().empty()) {
std::string platform_sdk_path;
if (sym_vendor->GetCompileOption("-sdk", platform_sdk_path)) {
FileSpec sdk_spec(platform_sdk_path.c_str());
if (FileSystem::Instance().Exists(sdk_spec)) {
swift_ast_sp->SetPlatformSDKPath(platform_sdk_path);
}
if (sym_vendor->GetCompileOption("-target", target_triple)) {
swift_ast_sp->SetTriple(llvm::Triple(target_triple), &module);
set_triple = true;
}
}
}
if (!got_serialized_options) {
std::vector<std::string> fw_paths;
if (sym_vendor->GetCompileOptions("-F", fw_paths))
for (std::string &fw_path : fw_paths)
framework_search_paths.push_back({fw_path, /*is_system*/ false});
std::vector<std::string> include_paths;
if (sym_vendor->GetCompileOptions("-I", include_paths)) {
for (std::string &search_path : include_paths) {
const FileSpec path_spec(search_path.c_str());
if (FileSystem::Instance().Exists(path_spec)) {
static const ConstString s_hmap_extension("hmap");
if (IsDirectory(path_spec)) {
module_search_paths.push_back(search_path);
} else if (IsRegularFile(path_spec) &&
path_spec.GetFileNameExtension() == s_hmap_extension) {
std::string argument("-I");
argument.append(search_path);
swift_ast_sp->AddClangArgument(argument.c_str());
}
}
}
}
std::vector<std::string> cc_options;
if (sym_vendor->GetCompileOptions("-Xcc", cc_options)) {
for (size_t i = 0; i < cc_options.size(); ++i) {
if (!cc_options[i].compare("-iquote") && i + 1 < cc_options.size()) {
swift_ast_sp->AddClangArgumentPair("-iquote", cc_options[i + 1]);
}
}
}
}
}
if (!set_triple) {
llvm::Triple llvm_triple = swift_ast_sp->GetTriple();
// LLVM wants this to be set to iOS or MacOSX; if we're working on
// a bare-boards type image, change the triple for LLVM's benefit.
if (llvm_triple.getVendor() == llvm::Triple::Apple &&
llvm_triple.getOS() == llvm::Triple::UnknownOS) {
if (llvm_triple.getArch() == llvm::Triple::arm ||
llvm_triple.getArch() == llvm::Triple::thumb) {
llvm_triple.setOS(llvm::Triple::IOS);
} else {
llvm_triple.setOS(llvm::Triple::MacOSX);
}
swift_ast_sp->SetTriple(llvm_triple, &module);
}
}
StringRef resource_dir = swift_ast_sp->GetResourceDir(triple);
ConfigureResourceDirs(swift_ast_sp->GetCompilerInvocation(),
FileSpec(resource_dir), triple);
// Apply the working directory to all relative paths.
std::vector<std::string> DeserializedArgs = swift_ast_sp->GetClangArguments();
swift_ast_sp->GetClangImporterOptions().ExtraArgs.clear();
swift_ast_sp->AddExtraClangArgs(DeserializedArgs);
if (target)
swift_ast_sp->AddUserClangArgs(*target);
else if (auto &global_target_properties = Target::GetGlobalProperties())
swift_ast_sp->AddUserClangArgs(*global_target_properties);
// Apply source path remappings found in the module's dSYM.
swift_ast_sp->RemapClangImporterOptions(module.GetSourceMappingList());
// Add Swift interfaces in the .dSYM at the end of the search paths.
// .swiftmodules win over .swiftinterfaces, when they are loaded
// directly from the .swift_ast section.
//
// FIXME: Since these paths also end up in the scratch context, we
// would need a mechanism to ensure that and newer versions
// (in the library evolution sense, not the date on disk) win
// over older versions of the same .swiftinterface.
if (auto dsym = GetDSYMBundle(module)) {
llvm::SmallString<256> path(*dsym);
llvm::Triple triple(swift_ast_sp->GetTriple());
StringRef arch = llvm::Triple::getArchTypeName(triple.getArch());
llvm::sys::path::append(path, "Contents", "Resources", "Swift", arch);
bool exists = false;
llvm::sys::fs::is_directory(path, exists);
if (exists)
module_search_paths.push_back(path.str());
}
swift_ast_sp->InitializeSearchPathOptions(module_search_paths,
framework_search_paths);
if (!swift_ast_sp->GetClangImporter()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"(\"%s\") returning NULL - couldn't create a ClangImporter",
module.GetFileSpec().GetFilename().AsCString("<anonymous>"));
return {};
}
std::vector<std::string> module_names;
swift_ast_sp->RegisterSectionModules(module, module_names);
swift_ast_sp->ValidateSectionModules(module, module_names);
if (lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES)) {
std::lock_guard<std::recursive_mutex> locker(g_log_mutex);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "((Module*)%p, \"%s\") = %p",
static_cast<void *>(&module),
module.GetFileSpec().GetFilename().AsCString("<anonymous>"),
static_cast<void *>(swift_ast_sp.get()));
swift_ast_sp->LogConfiguration();
}
return swift_ast_sp;
}
lldb::TypeSystemSP SwiftASTContext::CreateInstance(lldb::LanguageType language,
Target &target,
const char *extra_options) {
if (!SwiftASTContextSupportsLanguage(language))
return lldb::TypeSystemSP();
std::string m_description = "SwiftASTContextForExpressions";
std::vector<std::string> module_search_paths;
std::vector<std::pair<std::string, bool>> framework_search_paths;
// Make an AST but don't set the triple yet. We need to try and
// detect if we have a iOS simulator.
std::shared_ptr<SwiftASTContextForExpressions> swift_ast_sp(
new SwiftASTContextForExpressions(m_description, target));
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(Target)");
auto logError = [&](const char *message) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Failed to create scratch context - %s",
message);
// Avoid spamming the user with errors.
if (!target.UseScratchTypesystemPerModule()) {
StreamSP errs_sp = target.GetDebugger().GetAsyncErrorStream();
errs_sp->Printf("Cannot create Swift scratch context (%s)", message);
}
};
ArchSpec arch = target.GetArchitecture();
if (!arch.IsValid()) {
logError("invalid target architecture");
return TypeSystemSP();
}
// This is a scratch AST context, mark it as such.
swift_ast_sp->m_is_scratch_context = true;
swift_ast_sp->GetLanguageOptions().EnableTargetOSChecking = false;
bool handled_sdk_path = false;
const size_t num_images = target.GetImages().GetSize();
// Set the SDK path prior to doing search paths. Otherwise when we
// create search path options we put in the wrong SDK path.
FileSpec &target_sdk_spec = target.GetSDKPath();
if (target_sdk_spec && FileSystem::Instance().Exists(target_sdk_spec)) {
swift_ast_sp->SetPlatformSDKPath(target_sdk_spec.GetPath());
handled_sdk_path = true;
}
if (target.GetSwiftCreateModuleContextsInParallel()) {
// The first call to GetTypeSystemForLanguage() on a module will
// trigger the import (and thus most likely the rebuild) of all
// the Clang modules that were imported in this module. This can
// be a lot of work (potentially ten seconds per module), but it
// can be performed in parallel.
llvm::ThreadPool pool;
for (size_t mi = 0; mi != num_images; ++mi) {
auto module_sp = target.GetImages().GetModuleAtIndex(mi);
pool.async([=] {
module_sp->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift);
});
}
pool.wait();
}
Status module_error;
for (size_t mi = 0; mi != num_images; ++mi) {
ModuleSP module_sp = target.GetImages().GetModuleAtIndex(mi);
// Skip images without a serialized Swift AST. This avoids
// spurious warning messages.
if (!HasSwiftModules(*module_sp))
continue;
SwiftASTContext *module_swift_ast = llvm::dyn_cast_or_null<SwiftASTContext>(
module_sp->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift));
if (!module_swift_ast || module_swift_ast->HasFatalErrors() ||
!module_swift_ast->GetClangImporter()) {
// Make sure we warn about this module load failure, the one
// that comes from loading types often gets swallowed up and not
// seen, this is the only reliable point where we can show this.
// But only do it once per UUID so we don't overwhelm the user
// with warnings.
UUID module_uuid(module_sp->GetUUID());
bool unique_message =
target.RegisterSwiftContextMessageKey(module_uuid.GetAsString());
if (unique_message) {
StreamString ss;
module_sp->GetDescription(&ss, eDescriptionLevelBrief);
if (module_swift_ast && module_swift_ast->HasFatalErrors())
ss << ": "
<< module_swift_ast->GetFatalErrors().AsCString("unknown error");
target.GetDebugger().GetErrorFile()->Printf(
"Error while loading Swift module:\n%s\n"
"Debug info from this module will be unavailable in the "
"debugger.\n\n",
ss.GetData());
}
continue;
}
if (!handled_sdk_path) {
StringRef platform_sdk_path = module_swift_ast->GetPlatformSDKPath();
if (!platform_sdk_path.empty()) {
handled_sdk_path = true;
swift_ast_sp->SetPlatformSDKPath(platform_sdk_path);
}
}
if (handled_sdk_path)
break;
}
// First, prime the compiler with the options from the main executable:
bool got_serialized_options = false;
ModuleSP exe_module_sp(target.GetExecutableModule());
// If we're debugging a testsuite, then treat the main test bundle
// as the executable.
if (exe_module_sp && PlatformDarwin::IsUnitTestExecutable(*exe_module_sp)) {
ModuleSP unit_test_module =
PlatformDarwin::GetUnitTestModule(target.GetImages());
if (unit_test_module) {
exe_module_sp = unit_test_module;
}
}
// Attempt to deserialize the compiler flags from the AST.
if (exe_module_sp) {
llvm::SmallString<0> error;
llvm::raw_svector_ostream errs(error);
if (DeserializeAllCompilerFlags(*swift_ast_sp, *exe_module_sp,
m_description, errs,
got_serialized_options)) {
if (Process *process = target.GetProcessSP().get())
process->PrintWarningCantLoadSwiftModule(*exe_module_sp, error.c_str());
LOG_PRINTF(
LIBLLDB_LOG_TYPES,
"Attempt to load compiler options from serialized AST failed: %s",
error.c_str());
}
}
// Now if the user fully specified the triple, let that override the one
// we got from executable's options:
if (target.GetArchitecture().IsFullySpecifiedTriple()) {
swift_ast_sp->SetTriple(target.GetArchitecture().GetTriple());
} else {
// Always run using the Host OS triple...
bool set_triple = false;
PlatformSP platform_sp(target.GetPlatform());
llvm::Triple target_triple = target.GetArchitecture().GetTriple();
if (platform_sp && !target_triple.hasEnvironment()) {
llvm::VersionTuple version =
platform_sp->GetOSVersion(target.GetProcessSP().get());
std::string buffer;
llvm::raw_string_ostream(buffer)
<< target_triple.getArchName() << '-' << target_triple.getVendorName()
<< '-' << llvm::Triple::getOSTypeName(target_triple.getOS())
<< version.getAsString();
swift_ast_sp->SetTriple(llvm::Triple(buffer));
set_triple = true;
}
if (!set_triple)
if (ModuleSP exe_module_sp = target.GetExecutableModule()) {
auto *exe_swift_ctx = llvm::dyn_cast_or_null<SwiftASTContext>(
exe_module_sp->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift));
if (exe_swift_ctx)
swift_ast_sp->SetTriple(exe_swift_ctx->GetLanguageOptions().Target);
}
}
llvm::Triple triple(swift_ast_sp->GetTriple());
StringRef resource_dir = swift_ast_sp->GetResourceDir(triple);
ConfigureResourceDirs(swift_ast_sp->GetCompilerInvocation(),
FileSpec(resource_dir), triple);
const bool use_all_compiler_flags =
!got_serialized_options || target.GetUseAllCompilerFlags();
std::function<void(ModuleSP &&)> process_one_module =
[&](ModuleSP &&module_sp) {
const FileSpec &module_file = module_sp->GetFileSpec();
std::string module_path = module_file.GetPath();
// Add the containing framework to the framework search path.
// Don't do that if this is the executable module, since it
// might be buried in some framework that we don't care about.
if (use_all_compiler_flags &&
target.GetExecutableModulePointer() != module_sp.get()) {
size_t framework_offset = module_path.rfind(".framework/");
if (framework_offset != std::string::npos) {
// Sometimes the version of the framework that got loaded has been
// stripped and in that case, adding it to the framework search
// path will just short-cut a clang search that might otherwise
// find the needed headers. So don't add these paths.
std::string framework_path =
module_path.substr(0, framework_offset);
framework_path.append(".framework");
FileSpec path_spec(framework_path);
FileSystem::Instance().Resolve(path_spec);
FileSpec headers_spec =
path_spec.CopyByAppendingPathComponent("Headers");
bool add_it = false;
if (FileSystem::Instance().Exists(headers_spec))
add_it = true;
if (!add_it) {
FileSpec module_spec =
path_spec.CopyByAppendingPathComponent("Modules");
if (FileSystem::Instance().Exists(module_spec))
add_it = true;
}
if (!add_it) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"process_one_module rejecting framework path \"%s\" "
"as it has no Headers or Modules subdirectories.",
framework_path.c_str());
}
if (add_it) {
while (framework_offset && (module_path[framework_offset] != '/'))
framework_offset--;
if (module_path[framework_offset] == '/') {
// framework_offset now points to the '/';
std::string parent_path =
module_path.substr(0, framework_offset);
if (!StringRef(parent_path).equals("/System/Library") &&
!IsDeviceSupport(parent_path.c_str()))
framework_search_paths.push_back(
{std::move(parent_path), /*system*/ false});
}
}
}
}
// Skip images without a serialized Swift AST.
if (!HasSwiftModules(*module_sp))
return;
SymbolVendor *sym_vendor = module_sp->GetSymbolVendor();
if (!sym_vendor)
return;
SymbolFile *sym_file = sym_vendor->GetSymbolFile();
if (!sym_file)
return;
Status sym_file_error;
SwiftASTContext *ast_context = llvm::dyn_cast_or_null<SwiftASTContext>(
sym_file->GetTypeSystemForLanguage(lldb::eLanguageTypeSwift));
if (ast_context && !ast_context->HasErrors()) {
if (use_all_compiler_flags ||
target.GetExecutableModulePointer() == module_sp.get()) {
const auto &opts = ast_context->GetSearchPathOptions();
module_search_paths.insert(module_search_paths.end(),
opts.ImportSearchPaths.begin(),
opts.ImportSearchPaths.end());
for (const auto &fwsp : opts.FrameworkSearchPaths)
framework_search_paths.push_back({fwsp.Path, fwsp.IsSystem});
swift_ast_sp->AddExtraClangArgs(ast_context->GetClangArguments());
}
}
};
for (size_t mi = 0; mi != num_images; ++mi) {
process_one_module(target.GetImages().GetModuleAtIndex(mi));
}
FileSpecList target_module_paths = target.GetSwiftModuleSearchPaths();
for (size_t mi = 0, me = target_module_paths.GetSize(); mi != me; ++mi)
module_search_paths.push_back(
target_module_paths.GetFileSpecAtIndex(mi).GetPath());
FileSpecList target_framework_paths = target.GetSwiftFrameworkSearchPaths();
for (size_t fi = 0, fe = target_framework_paths.GetSize(); fi != fe; ++fi)
framework_search_paths.push_back(
{target_framework_paths.GetFileSpecAtIndex(fi).GetPath(),
/*is_system*/ false});
// Now fold any extra options we were passed. This has to be done
// BEFORE the ClangImporter is made by calling GetClangImporter or
// these options will be ignored.
swift_ast_sp->AddUserClangArgs(target);
if (extra_options) {
swift::CompilerInvocation &compiler_invocation =
swift_ast_sp->GetCompilerInvocation();
Args extra_args(extra_options);
llvm::ArrayRef<const char *> extra_args_ref(extra_args.GetArgumentVector(),
extra_args.GetArgumentCount());
compiler_invocation.parseArgs(extra_args_ref,
swift_ast_sp->GetDiagnosticEngine());
}
// Apply source path remappings found in the target settings.
swift_ast_sp->RemapClangImporterOptions(target.GetSourcePathMap());
// This needs to happen once all the import paths are set, or
// otherwise no modules will be found.
swift_ast_sp->InitializeSearchPathOptions(module_search_paths,
framework_search_paths);
if (!swift_ast_sp->GetClangImporter()) {
logError("couldn't create a ClangImporter");
return TypeSystemSP();
}
for (size_t mi = 0; mi != num_images; ++mi) {
std::vector<std::string> module_names;
auto module_sp = target.GetImages().GetModuleAtIndex(mi);
swift_ast_sp->RegisterSectionModules(*module_sp, module_names);
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "((Target*)%p) = %p",
static_cast<void *>(&target),
static_cast<void *>(swift_ast_sp.get()));
swift_ast_sp->LogConfiguration();
if (swift_ast_sp->HasFatalErrors()) {
logError(swift_ast_sp->GetFatalErrors().AsCString());
return {};
}
const bool can_create = true;
if (!swift_ast_sp->m_ast_context_ap->getStdlibModule(can_create)) {
logError("couldn't load the Swift stdlib");
return {};
}
return swift_ast_sp;
}
void SwiftASTContext::EnumerateSupportedLanguages(
std::set<lldb::LanguageType> &languages_for_types,
std::set<lldb::LanguageType> &languages_for_expressions) {
static std::vector<lldb::LanguageType> s_supported_languages_for_types(
{lldb::eLanguageTypeSwift});
static std::vector<lldb::LanguageType> s_supported_languages_for_expressions(
{lldb::eLanguageTypeSwift});
languages_for_types.insert(s_supported_languages_for_types.begin(),
s_supported_languages_for_types.end());
languages_for_expressions.insert(
s_supported_languages_for_expressions.begin(),
s_supported_languages_for_expressions.end());
}
static lldb::TypeSystemSP CreateTypeSystemInstance(lldb::LanguageType language,
Module *module,
Target *target,
const char *extra_options) {
// This should be called with either a target or a module.
if (module) {
assert(!target);
assert(StringRef(extra_options).empty());
return SwiftASTContext::CreateInstance(language, *module);
} else if (target) {
assert(!module);
return SwiftASTContext::CreateInstance(language, *target, extra_options);
}
llvm_unreachable("Neither type nor module given to CreateTypeSystemInstance");
}
void SwiftASTContext::Initialize() {
PluginManager::RegisterPlugin(
GetPluginNameStatic(), "swift AST context plug-in",
CreateTypeSystemInstance, EnumerateSupportedLanguages);
}
void SwiftASTContext::Terminate() {
PluginManager::UnregisterPlugin(CreateTypeSystemInstance);
}
bool SwiftASTContext::SupportsLanguage(lldb::LanguageType language) {
return SwiftASTContextSupportsLanguage(language);
}
Status SwiftASTContext::IsCompatible() { return GetFatalErrors(); }
Status SwiftASTContext::GetFatalErrors() {
Status error;
if (HasFatalErrors()) {
error = m_fatal_errors;
if (error.Success()) {
// Retrieve the error message from the DiagnosticConsumer.
DiagnosticManager diagnostic_manager;
PrintDiagnostics(diagnostic_manager);
error.SetErrorString(diagnostic_manager.GetString());
}
}
return error;
}
swift::IRGenOptions &SwiftASTContext::GetIRGenOptions() {
return m_compiler_invocation_ap->getIRGenOptions();
}
llvm::Triple SwiftASTContext::GetTriple() const {
VALID_OR_RETURN(llvm::Triple());
return llvm::Triple(m_compiler_invocation_ap->getTargetTriple());
}
/// Conditions a triple string to be safe for use with Swift. Right
/// now this just strips the Haswell marker off the CPU name.
///
/// TODO: Make Swift more robust.
static std::string GetSwiftFriendlyTriple(StringRef triple) {
if (triple.consume_front("x86_64h"))
return std::string("x86_64") + triple.str();
return triple.str();
}
bool SwiftASTContext::SetTriple(const llvm::Triple triple, Module *module) {
VALID_OR_RETURN(false);
if (triple.str().empty())
return false;
// The triple may change up until a swift::irgen::IRGenModule is created.
if (m_ir_gen_module_ap.get()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"(\"%s\") ignoring triple "
"since the IRGenModule has already been created",
triple.str().c_str());
return false;
}
const unsigned unspecified = 0;
std::string adjusted_triple = GetSwiftFriendlyTriple(triple.str());
// If the OS version is unspecified, do fancy things.
if (triple.getOSMajorVersion() == unspecified) {
// If a triple is "<arch>-apple-darwin" change it to be
// "<arch>-apple-macosx" otherwise the major and minor OS
// version we append below would be wrong.
if (triple.getVendor() == llvm::Triple::VendorType::Apple &&
triple.getOS() == llvm::Triple::OSType::Darwin) {
llvm::Triple mac_triple(adjusted_triple);
mac_triple.setOS(llvm::Triple::OSType::MacOSX);
adjusted_triple = mac_triple.str();
}
// Append the min OS to the triple if we have a target
ModuleSP module_sp;
if (!module) {
TargetSP target_sp(m_target_wp.lock());
if (target_sp) {
module_sp = target_sp->GetExecutableModule();
if (module_sp)
module = module_sp.get();
}
}
if (module) {
if (ObjectFile *objfile = module->GetObjectFile())
if (llvm::VersionTuple version = objfile->GetMinimumOSVersion()) {
llvm::Triple vers_triple(adjusted_triple);
vers_triple.setOSName(vers_triple.getOSName().str() +
version.getAsString());
adjusted_triple = vers_triple.str();
}
}
}
if (llvm::Triple(triple).getOS() == llvm::Triple::UnknownOS) {
// This case triggers an llvm_unreachable() in the Swift compiler.
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Cannot initialize Swift with an unknown OS");
return false;
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") setting to \"%s\"",
triple.str().c_str(), adjusted_triple.c_str());
llvm::Triple adjusted_llvm_triple(adjusted_triple);
m_compiler_invocation_ap->setTargetTriple(adjusted_llvm_triple);
assert(GetTriple() == adjusted_llvm_triple);
assert(!m_ast_context_ap ||
(llvm::Triple(m_ast_context_ap->LangOpts.Target.getTriple()) ==
adjusted_llvm_triple));
// Every time the triple is changed the LangOpts must be updated
// too, because Swift default-initializes the EnableObjCInterop
// flag based on the triple.
GetLanguageOptions().EnableObjCInterop = triple.isOSDarwin();
return true;
}
namespace {
struct SDKEnumeratorInfo {
FileSpec found_path;
SDKType sdk_type;
uint32_t least_major;
uint32_t least_minor;
};
} // anonymous namespace
static bool SDKSupportsSwift(const FileSpec &sdk_path, SDKType desired_type) {
ConstString last_path_component = sdk_path.GetLastPathComponent();
if (last_path_component) {
const llvm::StringRef sdk_name_raw = last_path_component.GetStringRef();
std::string sdk_name_lower = sdk_name_raw.lower();
const llvm::StringRef sdk_name(sdk_name_lower);
llvm::StringRef version_part;
SDKType sdk_type = SDKType::unknown;
if (desired_type == SDKType::unknown) {
for (int i = (int)SDKType::MacOSX; i < SDKType::numSDKTypes; ++i) {
if (sdk_name.startswith(sdk_strings[i])) {
version_part = sdk_name.drop_front(strlen(sdk_strings[i]));
sdk_type = (SDKType)i;
break;
}
}
// For non-Darwin SDKs assume Swift is supported
if (sdk_type == SDKType::unknown)
return true;
} else {
if (sdk_name.startswith(sdk_strings[desired_type])) {
version_part = sdk_name.drop_front(strlen(sdk_strings[desired_type]));
sdk_type = desired_type;
} else {
return false;
}
}
const size_t major_dot_offset = version_part.find('.');
if (major_dot_offset == llvm::StringRef::npos)
return false;
const llvm::StringRef major_version =
version_part.slice(0, major_dot_offset);
const llvm::StringRef minor_part =
version_part.drop_front(major_dot_offset + 1);
const size_t minor_dot_offset = minor_part.find('.');
if (minor_dot_offset == llvm::StringRef::npos)
return false;
const llvm::StringRef minor_version = minor_part.slice(0, minor_dot_offset);
unsigned int major = 0;
unsigned int minor = 0;
if (major_version.getAsInteger(10, major))
return false;
if (minor_version.getAsInteger(10, minor))
return false;
switch (sdk_type) {
case SDKType::MacOSX:
if (major > 10 || (major == 10 && minor >= 10))
return true;
break;
case SDKType::iPhoneOS:
case SDKType::iPhoneSimulator:
if (major >= 8)
return true;
break;
case SDKType::AppleTVSimulator:
case SDKType::AppleTVOS:
if (major >= 9)
return true;
break;
case SDKType::WatchSimulator:
case SDKType::watchOS:
if (major >= 2)
return true;
break;
case SDKType::Linux:
return true;
default:
return false;
}
}
return false;
}
FileSystem::EnumerateDirectoryResult
DirectoryEnumerator(void *baton, llvm::sys::fs::file_type file_type,
StringRef path) {
SDKEnumeratorInfo *enumerator_info = static_cast<SDKEnumeratorInfo *>(baton);
const FileSpec spec(path);
if (SDKSupportsSwift(spec, enumerator_info->sdk_type)) {
enumerator_info->found_path = spec;
return FileSystem::EnumerateDirectoryResult::eEnumerateDirectoryResultNext;
}
return FileSystem::EnumerateDirectoryResult::eEnumerateDirectoryResultNext;
};
static ConstString EnumerateSDKsForVersion(FileSpec sdks_spec, SDKType sdk_type,
uint32_t least_major,
uint32_t least_minor) {
if (!IsDirectory(sdks_spec))
return ConstString();
const bool find_directories = true;
const bool find_files = false;
const bool find_other = true; ///< Include symlinks.
SDKEnumeratorInfo enumerator_info;
enumerator_info.sdk_type = sdk_type;
enumerator_info.least_major = least_major;
enumerator_info.least_minor = least_minor;
FileSystem::Instance().EnumerateDirectory(
sdks_spec.GetPath().c_str(), find_directories, find_files, find_other,
DirectoryEnumerator, &enumerator_info);
if (IsDirectory(enumerator_info.found_path))
return ConstString(enumerator_info.found_path.GetPath());
else
return ConstString();
}
static ConstString GetSDKDirectory(SDKType sdk_type, uint32_t least_major,
uint32_t least_minor) {
if (sdk_type != SDKType::MacOSX) {
// Look inside Xcode for the required installed iOS SDK version.
std::string sdks_path = GetXcodeContentsPath();
sdks_path.append("Developer/Platforms");
if (sdk_type == SDKType::iPhoneSimulator) {
sdks_path.append("/iPhoneSimulator.platform/");
} else if (sdk_type == SDKType::AppleTVSimulator) {
sdks_path.append("/AppleTVSimulator.platform/");
} else if (sdk_type == SDKType::AppleTVOS) {
sdks_path.append("/AppleTVOS.platform/");
} else if (sdk_type == SDKType::WatchSimulator) {
sdks_path.append("/WatchSimulator.platform/");
} else if (sdk_type == SDKType::watchOS) {
// For now, we need to be prepared to handle either capitalization of this
// path.
std::string WatchOS_candidate_path = sdks_path + "/WatchOS.platform/";
if (IsDirectory(FileSpec(WatchOS_candidate_path.c_str()))) {
sdks_path = WatchOS_candidate_path;
} else {
std::string watchOS_candidate_path = sdks_path + "/watchOS.platform/";
if (IsDirectory(FileSpec(watchOS_candidate_path.c_str()))) {
sdks_path = watchOS_candidate_path;
} else {
return ConstString();
}
}
} else {
sdks_path.append("/iPhoneOS.platform/");
}
sdks_path.append("Developer/SDKs/");
FileSpec sdks_spec(sdks_path.c_str());
return EnumerateSDKsForVersion(sdks_spec, sdk_type, least_major,
least_major);
}
// The SDK type is macOS.
llvm::VersionTuple version = HostInfo::GetOSVersion();
if (!version)
return ConstString();
uint32_t major = version.getMajor();
uint32_t minor = version.getMinor().getValueOr(0);
// If there are minimum requirements that exceed the current OS,
// apply those.
if (least_major > major) {
major = least_major;
minor = least_minor;
} else if (least_major == major) {
if (least_minor > minor)
minor = least_minor;
}
typedef std::map<uint64_t, ConstString> SDKDirectoryCache;
static std::mutex g_mutex;
static SDKDirectoryCache g_sdk_cache;
std::lock_guard<std::mutex> locker(g_mutex);
const uint64_t major_minor = (uint64_t)major << 32 | (uint64_t)minor;
SDKDirectoryCache::iterator pos = g_sdk_cache.find(major_minor);
if (pos != g_sdk_cache.end())
return pos->second;
FileSpec fspec;
std::string xcode_contents_path;
if (xcode_contents_path.empty())
xcode_contents_path = GetXcodeContentsPath();
if (!xcode_contents_path.empty()) {
StreamString sdk_path;
sdk_path.Printf(
"%sDeveloper/Platforms/MacOSX.platform/Developer/SDKs/MacOSX%u.%u.sdk",
xcode_contents_path.c_str(), major, minor);
fspec.SetFile(sdk_path.GetString(), FileSpec::Style::native);
if (FileSystem::Instance().Exists(fspec)) {
ConstString path(sdk_path.GetString());
// Cache results.
g_sdk_cache[major_minor] = path;
return path;
} else if ((least_major != major) || (least_minor != minor)) {
// Try the required SDK.
sdk_path.Clear();
sdk_path.Printf("%sDeveloper/Platforms/MacOSX.platform/Developer/SDKs/"
"MacOSX%u.%u.sdk",
xcode_contents_path.c_str(), least_major, least_minor);
fspec.SetFile(sdk_path.GetString(), FileSpec::Style::native);
if (FileSystem::Instance().Exists(fspec)) {
ConstString path(sdk_path.GetString());
// Cache results.
g_sdk_cache[major_minor] = path;
return path;
} else {
// Okay, we're going to do an exhaustive search for *any* SDK
// that has an adequate version.
std::string sdks_path = xcode_contents_path;
sdks_path.append("Developer/Platforms/MacOSX.platform/Developer/SDKs");
FileSpec sdks_spec(sdks_path.c_str());
ConstString sdk_path = EnumerateSDKsForVersion(
sdks_spec, sdk_type, least_major, least_major);
if (sdk_path) {
g_sdk_cache[major_minor] = sdk_path;
return sdk_path;
}
}
}
}
// Cache results.
g_sdk_cache[major_minor] = ConstString();
return ConstString();
}
swift::CompilerInvocation &SwiftASTContext::GetCompilerInvocation() {
return *m_compiler_invocation_ap;
}
swift::SourceManager &SwiftASTContext::GetSourceManager() {
if (m_source_manager_ap.get() == NULL)
m_source_manager_ap.reset(new swift::SourceManager());
return *m_source_manager_ap;
}
swift::LangOptions &SwiftASTContext::GetLanguageOptions() {
return GetCompilerInvocation().getLangOptions();
}
swift::DiagnosticEngine &SwiftASTContext::GetDiagnosticEngine() {
if (!m_diagnostic_engine_ap) {
m_diagnostic_engine_ap.reset(
new swift::DiagnosticEngine(GetSourceManager()));
// The following diagnostics are fatal, but they are diagnosed at
// a very early point where the AST isn't yet destroyed beyond repair.
m_diagnostic_engine_ap->ignoreDiagnostic(
swift::diag::serialization_module_too_old.ID);
m_diagnostic_engine_ap->ignoreDiagnostic(
swift::diag::serialization_module_too_new.ID);
m_diagnostic_engine_ap->ignoreDiagnostic(
swift::diag::serialization_module_language_version_mismatch.ID);
}
return *m_diagnostic_engine_ap;
}
swift::SILOptions &SwiftASTContext::GetSILOptions() {
return GetCompilerInvocation().getSILOptions();
}
bool SwiftASTContext::TargetHasNoSDK() {
llvm::Triple triple(GetTriple());
switch (triple.getOS()) {
case llvm::Triple::OSType::MacOSX:
case llvm::Triple::OSType::Darwin:
case llvm::Triple::OSType::IOS:
return false;
default:
return true;
}
}
swift::ClangImporterOptions &SwiftASTContext::GetClangImporterOptions() {
swift::ClangImporterOptions &clang_importer_options =
GetCompilerInvocation().getClangImporterOptions();
if (!m_initialized_clang_importer_options) {
m_initialized_clang_importer_options = true;
// Set the Clang module search path.
llvm::SmallString<128> path;
auto props = ModuleList::GetGlobalModuleListProperties();
props.GetClangModulesCachePath().GetPath(path);
clang_importer_options.ModuleCachePath = path.str();
FileSpec clang_dir_spec;
clang_dir_spec = GetClangResourceDir();
if (FileSystem::Instance().Exists(clang_dir_spec))
clang_importer_options.OverrideResourceDir = clang_dir_spec.GetPath();
clang_importer_options.DebuggerSupport = true;
}
return clang_importer_options;
}
swift::SearchPathOptions &SwiftASTContext::GetSearchPathOptions() {
assert(m_initialized_search_path_options);
return GetCompilerInvocation().getSearchPathOptions();
}
void SwiftASTContext::InitializeSearchPathOptions(
llvm::ArrayRef<std::string> module_search_paths,
llvm::ArrayRef<std::pair<std::string, bool>> framework_search_paths) {
swift::SearchPathOptions &search_path_opts =
GetCompilerInvocation().getSearchPathOptions();
assert(!m_initialized_search_path_options);
m_initialized_search_path_options = true;
bool set_sdk = false;
if (!search_path_opts.SDKPath.empty()) {
FileSpec provided_sdk_path(search_path_opts.SDKPath);
if (FileSystem::Instance().Exists(provided_sdk_path)) {
// We don't check whether the SDK supports swift because we figure if
// someone is passing this to us on the command line (e.g., for the
// REPL), they probably know what they're doing.
set_sdk = true;
}
} else if (!m_platform_sdk_path.empty()) {
FileSpec platform_sdk(m_platform_sdk_path.c_str());
if (FileSystem::Instance().Exists(platform_sdk) &&
SDKSupportsSwift(platform_sdk, SDKType::unknown)) {
search_path_opts.SDKPath = m_platform_sdk_path.c_str();
set_sdk = true;
}
}
llvm::Triple triple(GetTriple());
StringRef resource_dir = GetResourceDir(triple);
ConfigureResourceDirs(GetCompilerInvocation(), FileSpec(resource_dir),
triple);
auto is_simulator = [&]() -> bool {
return triple.getEnvironment() == llvm::Triple::Simulator ||
!triple.getArchName().startswith("arm");
};
if (!set_sdk) {
auto sdk = GetSDKType(triple, HostInfo::GetArchitecture().GetTriple());
// Explicitly leave the SDKPath blank on other platforms.
if (sdk.sdk_type != SDKType::unknown) {
auto dir = GetSDKDirectory(sdk.sdk_type, sdk.min_version_major,
sdk.min_version_minor);
search_path_opts.SDKPath = dir.AsCString("");
}
}
llvm::StringMap<bool> processed;
// Add all deserialized paths to the map.
for (const auto &path : search_path_opts.ImportSearchPaths)
processed.insert({path, false});
// Add/unique all extra paths.
for (const auto &path : module_search_paths) {
search_path_opts.ImportSearchPaths.push_back(path);
auto it_notseen = processed.insert({path, false});
if (it_notseen.second)
search_path_opts.ImportSearchPaths.push_back(path);
}
// This preserves the IsSystem bit, but deduplicates entries ignoring it.
processed.clear();
// Add all deserialized paths to the map.
for (const auto &path : search_path_opts.FrameworkSearchPaths)
processed.insert({path.Path, path.IsSystem});
// Add/unique all extra paths.
for (const auto &path : framework_search_paths) {
auto it_notseen = processed.insert(path);
if (it_notseen.second)
search_path_opts.FrameworkSearchPaths.push_back(
{path.first, path.second});
}
}
namespace lldb_private {
class ANSIColorStringStream : public llvm::raw_string_ostream {
public:
ANSIColorStringStream(bool colorize)
: llvm::raw_string_ostream(m_buffer), m_colorize(colorize) {}
/// Changes the foreground color of text that will be output from
/// this point forward.
/// \param Color ANSI color to use, the special SAVEDCOLOR can be
/// used to change only the bold attribute, and keep colors
/// untouched.
/// \param Bold bold/brighter text, default false
/// \param BG if true change the background,
/// default: change foreground
/// \returns itself so it can be used within << invocations.
virtual raw_ostream &changeColor(enum Colors colors, bool bold = false,
bool bg = false) {
if (llvm::sys::Process::ColorNeedsFlush())
flush();
const char *colorcode;
if (colors == SAVEDCOLOR)
colorcode = llvm::sys::Process::OutputBold(bg);
else
colorcode = llvm::sys::Process::OutputColor(colors, bold, bg);
if (colorcode) {
size_t len = strlen(colorcode);
write(colorcode, len);
}
return *this;
}
/// Resets the colors to terminal defaults. Call this when you are
/// done outputting colored text, or before program exit.
virtual raw_ostream &resetColor() {
if (llvm::sys::Process::ColorNeedsFlush())
flush();
const char *colorcode = llvm::sys::Process::ResetColor();
if (colorcode) {
size_t len = strlen(colorcode);
write(colorcode, len);
}
return *this;
}
/// Reverses the forground and background colors.
virtual raw_ostream &reverseColor() {
if (llvm::sys::Process::ColorNeedsFlush())
flush();
const char *colorcode = llvm::sys::Process::OutputReverse();
if (colorcode) {
size_t len = strlen(colorcode);
write(colorcode, len);
}
return *this;
}
/// This function determines if this stream is connected to a "tty"
/// or "console" window. That is, the output would be displayed to
/// the user rather than being put on a pipe or stored in a file.
virtual bool is_displayed() const { return m_colorize; }
/// This function determines if this stream is displayed and
/// supports colors.
virtual bool has_colors() const { return m_colorize; }
protected:
std::string m_buffer;
bool m_colorize;
};
class StoringDiagnosticConsumer : public swift::DiagnosticConsumer {
public:
StoringDiagnosticConsumer(SwiftASTContext &ast_context)
: m_ast_context(ast_context), m_diagnostics(), m_num_errors(0),
m_colorize(false) {
m_ast_context.GetDiagnosticEngine().resetHadAnyError();
m_ast_context.GetDiagnosticEngine().addConsumer(*this);
}
~StoringDiagnosticConsumer() {
m_ast_context.GetDiagnosticEngine().takeConsumers();
}
virtual void
handleDiagnostic(swift::SourceManager &source_mgr,
swift::SourceLoc source_loc, swift::DiagnosticKind kind,
llvm::StringRef formatString,
llvm::ArrayRef<swift::DiagnosticArgument> formatArgs,
const swift::DiagnosticInfo &info,
const swift::SourceLoc bufferIndirectlyCausingDiagnostic) {
llvm::StringRef bufferName = "<anonymous>";
unsigned bufferID = 0;
std::pair<unsigned, unsigned> line_col = {0, 0};
llvm::SmallString<256> text;
{
llvm::raw_svector_ostream out(text);
swift::DiagnosticEngine::formatDiagnosticText(out, formatString,
formatArgs);
}
if (source_loc.isValid()) {
bufferID = source_mgr.findBufferContainingLoc(source_loc);
bufferName = source_mgr.getDisplayNameForLoc(source_loc);
line_col = source_mgr.getLineAndColumn(source_loc);
}
if (line_col.first != 0) {
ANSIColorStringStream os(m_colorize);
// Determine what kind of diagnostic we're emitting, and whether
// we want to use its fixits:
bool use_fixits = false;
llvm::SourceMgr::DiagKind source_mgr_kind;
switch (kind) {
default:
case swift::DiagnosticKind::Error:
source_mgr_kind = llvm::SourceMgr::DK_Error;
use_fixits = true;
break;
case swift::DiagnosticKind::Warning:
source_mgr_kind = llvm::SourceMgr::DK_Warning;
break;
case swift::DiagnosticKind::Note:
source_mgr_kind = llvm::SourceMgr::DK_Note;
break;
}
// Translate ranges.
llvm::SmallVector<llvm::SMRange, 2> ranges;
for (auto R : info.Ranges)
ranges.push_back(getRawRange(source_mgr, R));
// Translate fix-its.
llvm::SmallVector<llvm::SMFixIt, 2> fix_its;
for (swift::DiagnosticInfo::FixIt F : info.FixIts)
fix_its.push_back(getRawFixIt(source_mgr, F));
// Display the diagnostic.
auto message = source_mgr.GetMessage(source_loc, source_mgr_kind, text,
ranges, fix_its);
source_mgr.getLLVMSourceMgr().PrintMessage(os, message);
// Use the llvm::raw_string_ostream::str() accessor as it will
// flush the stream into our "message" and return us a reference
// to "message".
std::string &message_ref = os.str();
if (message_ref.empty())
m_diagnostics.push_back(RawDiagnostic(
text.str(), kind, bufferName, bufferID, line_col.first,
line_col.second,
use_fixits ? info.FixIts
: llvm::ArrayRef<swift::Diagnostic::FixIt>()));
else
m_diagnostics.push_back(RawDiagnostic(
message_ref, kind, bufferName, bufferID, line_col.first,
line_col.second,
use_fixits ? info.FixIts
: llvm::ArrayRef<swift::Diagnostic::FixIt>()));
} else {
m_diagnostics.push_back(RawDiagnostic(
text.str(), kind, bufferName, bufferID, line_col.first,
line_col.second, llvm::ArrayRef<swift::Diagnostic::FixIt>()));
}
if (kind == swift::DiagnosticKind::Error)
m_num_errors++;
}
void Clear() {
m_ast_context.GetDiagnosticEngine().resetHadAnyError();
m_diagnostics.clear();
m_num_errors = 0;
}
unsigned NumErrors() {
if (m_num_errors)
return m_num_errors;
else if (m_ast_context.GetASTContext()->hadError())
return 1;
else
return 0;
}
static DiagnosticSeverity SeverityForKind(swift::DiagnosticKind kind) {
switch (kind) {
case swift::DiagnosticKind::Error:
return eDiagnosticSeverityError;
case swift::DiagnosticKind::Warning:
return eDiagnosticSeverityWarning;
case swift::DiagnosticKind::Note:
return eDiagnosticSeverityRemark;
case swift::DiagnosticKind::Remark:
break;
}
llvm_unreachable("Unhandled DiagnosticKind in switch.");
}
void PrintDiagnostics(DiagnosticManager &diagnostic_manager,
uint32_t bufferID = UINT32_MAX, uint32_t first_line = 0,
uint32_t last_line = UINT32_MAX) {
bool added_one_diagnostic = false;
for (const RawDiagnostic &diagnostic : m_diagnostics) {
// We often make expressions and wrap them in some code. When
// we see errors we want the line numbers to be correct so we
// correct them below. LLVM stores in SourceLoc objects as
// character offsets so there is no way to get LLVM to move its
// error line numbers around by adjusting the source location,
// we must do it manually. We also want to use the same error
// formatting as LLVM and Clang, so we must muck with the
// string.
const DiagnosticSeverity severity = SeverityForKind(diagnostic.kind);
const DiagnosticOrigin origin = eDiagnosticOriginSwift;
if (first_line > 0 && bufferID != UINT32_MAX &&
diagnostic.bufferID == bufferID && !diagnostic.bufferName.empty()) {
// Make sure the error line is in range.
if (diagnostic.line >= first_line && diagnostic.line <= last_line) {
// Need to remap the error/warning to a different line.
StreamString match;
match.Printf("%s:%u:", diagnostic.bufferName.str().c_str(),
diagnostic.line);
const size_t match_len = match.GetString().size();
size_t match_pos = diagnostic.description.find(match.GetString());
if (match_pos != std::string::npos) {
// We have some <file>:<line>:" instances that need to be updated.
StreamString fixed_description;
size_t start_pos = 0;
do {
if (match_pos > start_pos)
fixed_description.Printf(
"%s", diagnostic.description.substr(start_pos, match_pos)
.c_str());
fixed_description.Printf(
"%s:%u:", diagnostic.bufferName.str().c_str(),
diagnostic.line - first_line + 1);
start_pos = match_pos + match_len;
match_pos =
diagnostic.description.find(match.GetString(), start_pos);
} while (match_pos != std::string::npos);
// Append any last remaining text.
if (start_pos < diagnostic.description.size())
fixed_description.Printf(
"%s", diagnostic.description
.substr(start_pos,
diagnostic.description.size() - start_pos)
.c_str());
SwiftDiagnostic *new_diagnostic =
new SwiftDiagnostic(fixed_description.GetString().data(),
severity, origin, bufferID);
for (auto fixit : diagnostic.fixits)
new_diagnostic->AddFixIt(fixit);
diagnostic_manager.AddDiagnostic(new_diagnostic);
added_one_diagnostic = true;
continue;
}
}
}
}
// In general, we don't want to see diagnostics from outside of
// the source text range of the actual user expression. But if we
// didn't find any diagnostics in the text range, it's probably
// because the source range was not specified correctly, and we
// don't want to lose legit errors because of that. So in that
// case we'll add them all here:
if (!added_one_diagnostic) {
// This will report diagnostic errors from outside the
// expression's source range. Those are not interesting to
// users, so we only emit them in debug builds.
for (const RawDiagnostic &diagnostic : m_diagnostics) {
const DiagnosticSeverity severity = SeverityForKind(diagnostic.kind);
const DiagnosticOrigin origin = eDiagnosticOriginSwift;
diagnostic_manager.AddDiagnostic(diagnostic.description.c_str(),
severity, origin);
}
}
}
bool GetColorize() const { return m_colorize; }
bool SetColorize(bool b) {
const bool old = m_colorize;
m_colorize = b;
return old;
}
private:
// We don't currently use lldb_private::Diagostic or any of the lldb
// DiagnosticManager machinery to store diagnostics as they
// occur. Instead, we store them in raw form using this struct, then
// transcode them to SwiftDiagnostics in PrintDiagnostic.
struct RawDiagnostic {
RawDiagnostic(std::string in_desc, swift::DiagnosticKind in_kind,
llvm::StringRef in_bufferName, unsigned in_bufferID,
uint32_t in_line, uint32_t in_column,
llvm::ArrayRef<swift::Diagnostic::FixIt> in_fixits)
: description(in_desc), kind(in_kind), bufferName(in_bufferName),
bufferID(in_bufferID), line(in_line), column(in_column) {
for (auto fixit : in_fixits) {
fixits.push_back(fixit);
}
}
std::string description;
swift::DiagnosticKind kind;
const llvm::StringRef bufferName;
unsigned bufferID;
uint32_t line;
uint32_t column;
std::vector<swift::DiagnosticInfo::FixIt> fixits;
};
typedef std::vector<RawDiagnostic> RawDiagnosticBuffer;
SwiftASTContext &m_ast_context;
RawDiagnosticBuffer m_diagnostics;
unsigned m_num_errors = 0;
bool m_colorize;
};
} // namespace lldb_private
swift::ASTContext *SwiftASTContext::GetASTContext() {
assert(m_initialized_search_path_options &&
m_initialized_clang_importer_options &&
"search path options must be initialized before ClangImporter");
swift::DependencyTracker *tracker = nullptr;
if (m_ast_context_ap.get())
return m_ast_context_ap.get();
m_ast_context_ap.reset(
swift::ASTContext::get(GetLanguageOptions(), GetSearchPathOptions(),
GetSourceManager(), GetDiagnosticEngine()));
m_diagnostic_consumer_ap.reset(new StoringDiagnosticConsumer(*this));
if (getenv("LLDB_SWIFT_DUMP_DIAGS")) {
// NOTE: leaking a swift::PrintingDiagnosticConsumer() here, but
// this only gets enabled when the above environment variable is
// set.
GetDiagnosticEngine().addConsumer(*new swift::PrintingDiagnosticConsumer());
}
// Create the clang importer and determine the clang module cache path
std::string moduleCachePath = "";
std::unique_ptr<swift::ClangImporter> clang_importer_ap;
auto &clang_importer_options = GetClangImporterOptions();
if (!m_ast_context_ap->SearchPathOpts.SDKPath.empty() || TargetHasNoSDK()) {
if (!clang_importer_options.OverrideResourceDir.empty()) {
clang_importer_ap = swift::ClangImporter::create(*m_ast_context_ap,
clang_importer_options);
moduleCachePath = swift::getModuleCachePathFromClang(
clang_importer_ap->getClangInstance());
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Using clang module cache path: %s",
moduleCachePath.c_str());
}
}
// Compute the prebuilt module cache path to use:
// <resource-dir>/<platform>/prebuilt-modules
llvm::Triple triple(GetTriple());
llvm::SmallString<128> prebuiltModuleCachePath = GetResourceDir(triple);
StringRef platform = swift::getPlatformNameForTriple(triple);
llvm::sys::path::append(prebuiltModuleCachePath, platform,
"prebuilt-modules");
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Using prebuilt module cache path: %s",
prebuiltModuleCachePath.c_str());
// Determine the Swift module loading mode to use.
auto props = ModuleList::GetGlobalModuleListProperties();
swift::ModuleLoadingMode loading_mode;
switch (props.GetSwiftModuleLoadingMode()) {
case eSwiftModuleLoadingModePreferSerialized:
loading_mode = swift::ModuleLoadingMode::PreferSerialized;
break;
case eSwiftModuleLoadingModePreferParseable:
loading_mode = swift::ModuleLoadingMode::PreferParseable;
break;
case eSwiftModuleLoadingModeOnlySerialized:
loading_mode = swift::ModuleLoadingMode::OnlySerialized;
break;
case eSwiftModuleLoadingModeOnlyParseable:
loading_mode = swift::ModuleLoadingMode::OnlyParseable;
break;
}
// The order here matters due to fallback behaviors:
// 1. Create and install the memory buffer serialized module loader.
std::unique_ptr<swift::ModuleLoader> memory_buffer_loader_ap(
swift::MemoryBufferSerializedModuleLoader::create(*m_ast_context_ap,
tracker, loading_mode));
if (memory_buffer_loader_ap) {
m_memory_buffer_module_loader =
static_cast<swift::MemoryBufferSerializedModuleLoader *>(
memory_buffer_loader_ap.get());
m_ast_context_ap->addModuleLoader(std::move(memory_buffer_loader_ap));
}
// 2. Create and install the parseable interface module loader.
//
// TODO: It may be nice to reverse the order between PIML and SML in
// LLDB, since binary swift modules likely contain private
// types that the parseable interfaces are missing. On the
// other hand if we need to go looking for a module on disk,
// something is already screwed up in the debug info.
std::unique_ptr<swift::ModuleLoader> parseable_module_loader_ap;
if (loading_mode != swift::ModuleLoadingMode::OnlySerialized) {
std::unique_ptr<swift::ModuleLoader> parseable_module_loader_ap(
swift::ParseableInterfaceModuleLoader::create(
*m_ast_context_ap, moduleCachePath, prebuiltModuleCachePath,
tracker, loading_mode));
if (parseable_module_loader_ap)
m_ast_context_ap->addModuleLoader(std::move(parseable_module_loader_ap));
}
// 3. Create and install the serialized module loader.
std::unique_ptr<swift::ModuleLoader> serialized_module_loader_ap(
swift::SerializedModuleLoader::create(*m_ast_context_ap, tracker,
loading_mode));
if (serialized_module_loader_ap)
m_ast_context_ap->addModuleLoader(std::move(serialized_module_loader_ap));
// 4. Install the clang importer.
if (clang_importer_ap) {
m_clang_importer = (swift::ClangImporter *)clang_importer_ap.get();
m_ast_context_ap->addModuleLoader(std::move(clang_importer_ap),
/*isClang=*/true);
}
// 5. Create and install the DWARF importer, but only for the module AST
// context.
if (!m_is_scratch_context) {
auto props = ModuleList::GetGlobalModuleListProperties();
if (props.GetUseDWARFImporter()) {
auto dwarf_importer_ap = swift::DWARFImporter::create(
*m_ast_context_ap, clang_importer_options);
if (dwarf_importer_ap) {
m_dwarf_importer = dwarf_importer_ap.get();
m_ast_context_ap->addModuleLoader(std::move(dwarf_importer_ap));
}
}
}
// Set up the required state for the evaluator in the TypeChecker.
registerTypeCheckerRequestFunctions(m_ast_context_ap->evaluator);
GetASTMap().Insert(m_ast_context_ap.get(), this);
VALID_OR_RETURN(nullptr);
return m_ast_context_ap.get();
}
swift::MemoryBufferSerializedModuleLoader *
SwiftASTContext::GetMemoryBufferModuleLoader() {
VALID_OR_RETURN(nullptr);
GetASTContext();
return m_memory_buffer_module_loader;
}
swift::ClangImporter *SwiftASTContext::GetClangImporter() {
VALID_OR_RETURN(nullptr);
GetASTContext();
return m_clang_importer;
}
bool SwiftASTContext::AddClangArgument(std::string clang_arg, bool unique) {
if (clang_arg.empty())
return false;
swift::ClangImporterOptions &importer_options = GetClangImporterOptions();
// Avoid inserting the same option twice.
if (unique)
for (std::string &arg : importer_options.ExtraArgs)
if (arg == clang_arg)
return false;
importer_options.ExtraArgs.push_back(clang_arg);
return true;
}
bool SwiftASTContext::AddClangArgumentPair(StringRef clang_arg_1,
StringRef clang_arg_2) {
if (clang_arg_1.empty() || clang_arg_2.empty())
return false;
swift::ClangImporterOptions &importer_options = GetClangImporterOptions();
bool add_hmap = true;
for (ssize_t ai = 0, ae = importer_options.ExtraArgs.size() -
1; // -1 because we look at the next one too
ai < ae; ++ai) {
if (clang_arg_1.equals(importer_options.ExtraArgs[ai]) &&
clang_arg_2.equals(importer_options.ExtraArgs[ai + 1]))
return false;
}
importer_options.ExtraArgs.push_back(clang_arg_1);
importer_options.ExtraArgs.push_back(clang_arg_2);
return true;
}
const swift::SearchPathOptions *SwiftASTContext::GetSearchPathOptions() const {
VALID_OR_RETURN(0);
if (!m_ast_context_ap)
return nullptr;
return &m_ast_context_ap->SearchPathOpts;
}
const std::vector<std::string> &SwiftASTContext::GetClangArguments() {
return GetClangImporterOptions().ExtraArgs;
}
swift::ModuleDecl *
SwiftASTContext::GetCachedModule(const SourceModule &module) {
VALID_OR_RETURN(nullptr);
if (!module.path.size())
return nullptr;
SwiftModuleMap::const_iterator iter =
m_swift_module_cache.find(module.path.front().GetStringRef());
if (iter != m_swift_module_cache.end())
return iter->second;
return nullptr;
}
swift::ModuleDecl *SwiftASTContext::CreateModule(const SourceModule &module,
Status &error) {
VALID_OR_RETURN(nullptr);
if (!module.path.size()) {
error.SetErrorStringWithFormat("invalid module name (empty)");
return nullptr;
}
if (swift::ModuleDecl *module_decl = GetCachedModule(module)) {
error.SetErrorStringWithFormat("module already exists for \"%s\"",
module.path.front().GetCString());
return nullptr;
}
swift::ASTContext *ast = GetASTContext();
if (!ast) {
error.SetErrorStringWithFormat("invalid swift AST (nullptr)");
return nullptr;
}
swift::Identifier module_id(
ast->getIdentifier(module.path.front().GetCString()));
auto *module_decl = swift::ModuleDecl::create(module_id, *ast);
if (!module_decl) {
error.SetErrorStringWithFormat("failed to create module for \"%s\"",
module.path.front().GetCString());
return nullptr;
}
m_swift_module_cache.insert(
{module.path.front().GetStringRef(), module_decl});
return module_decl;
}
void SwiftASTContext::CacheModule(swift::ModuleDecl *module) {
VALID_OR_RETURN_VOID();
if (!module)
return;
auto ID = module->getName().get();
if (!ID || !ID[0])
return;
if (m_swift_module_cache.find(ID) != m_swift_module_cache.end())
return;
m_swift_module_cache.insert({ID, module});
}
swift::ModuleDecl *SwiftASTContext::GetModule(const SourceModule &module,
Status &error) {
VALID_OR_RETURN(nullptr);
if (!module.path.size())
return nullptr;
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\")",
module.path.front().AsCString("<no name>"));
if (module.path.front().IsEmpty()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "empty module name");
error.SetErrorString("invalid module name (empty)");
return nullptr;
}
if (swift::ModuleDecl *module_decl = GetCachedModule(module))
return module_decl;
swift::ASTContext *ast = GetASTContext();
if (!ast) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") invalid ASTContext",
module.path.front().GetCString());
error.SetErrorString("invalid swift::ASTContext");
return nullptr;
}
typedef std::pair<swift::Identifier, swift::SourceLoc> ModuleNameSpec;
llvm::StringRef module_basename_sref = module.path.front().GetStringRef();
ModuleNameSpec name_pair(ast->getIdentifier(module_basename_sref),
swift::SourceLoc());
if (HasFatalErrors()) {
error.SetErrorStringWithFormat("failed to get module \"%s\" from AST "
"context:\nAST context is in a fatal "
"error state",
module.path.front().GetCString());
return nullptr;
}
ClearDiagnostics();
swift::ModuleDecl *module_decl = ast->getModuleByName(module_basename_sref);
if (HasErrors()) {
DiagnosticManager diagnostic_manager;
PrintDiagnostics(diagnostic_manager);
error.SetErrorStringWithFormat(
"failed to get module \"%s\" from AST context:\n%s",
module.path.front().GetCString(),
diagnostic_manager.GetString().data());
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") -- error: %s",
module.path.front().GetCString(),
diagnostic_manager.GetString().data());
return nullptr;
}
if (!module_decl) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "failed with no error",
module.path.front().GetCString());
error.SetErrorStringWithFormat(
"failed to get module \"%s\" from AST context",
module.path.front().GetCString());
return nullptr;
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") -- found %s",
module.path.front().GetCString(),
module_decl->getName().str().str().c_str());
m_swift_module_cache[module.path.front().GetStringRef()] = module_decl;
return module_decl;
}
swift::ModuleDecl *SwiftASTContext::GetModule(const FileSpec &module_spec,
Status &error) {
VALID_OR_RETURN(nullptr);
ConstString module_basename(module_spec.GetFileNameStrippingExtension());
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\")", module_spec.GetPath().c_str());
if (module_basename) {
SwiftModuleMap::const_iterator iter =
m_swift_module_cache.find(module_basename.GetCString());
if (iter != m_swift_module_cache.end())
return iter->second;
if (FileSystem::Instance().Exists(module_spec)) {
swift::ASTContext *ast = GetASTContext();
if (!GetClangImporter()) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"((FileSpec)\"%s\") -- no ClangImporter so giving up",
module_spec.GetPath().c_str());
error.SetErrorStringWithFormat("couldn't get a ClangImporter");
return nullptr;
}
std::string module_directory(module_spec.GetDirectory().GetCString());
bool add_search_path = true;
for (auto path : ast->SearchPathOpts.ImportSearchPaths) {
if (path == module_directory) {
add_search_path = false;
break;
}
}
// Add the search path if needed so we can find the module by basename.
if (add_search_path)
ast->SearchPathOpts.ImportSearchPaths.push_back(
std::move(module_directory));
typedef std::pair<swift::Identifier, swift::SourceLoc> ModuleNameSpec;
llvm::StringRef module_basename_sref(module_basename.GetCString());
ModuleNameSpec name_pair(ast->getIdentifier(module_basename_sref),
swift::SourceLoc());
swift::ModuleDecl *module =
ast->getModule(llvm::ArrayRef<ModuleNameSpec>(name_pair));
if (module) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "((FileSpec)\"%s\") -- found %s",
module_spec.GetPath().c_str(),
module->getName().str().str().c_str());
m_swift_module_cache[module_basename.GetCString()] = module;
return module;
} else {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"((FileSpec)\"%s\") -- couldn't get from AST context",
module_spec.GetPath().c_str());
error.SetErrorStringWithFormat(
"failed to get module \"%s\" from AST context",
module_basename.GetCString());
}
} else {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "((FileSpec)\"%s\") -- doesn't exist",
module_spec.GetPath().c_str());
error.SetErrorStringWithFormat("module \"%s\" doesn't exist",
module_spec.GetPath().c_str());
}
} else {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "((FileSpec)\"%s\") -- no basename",
module_spec.GetPath().c_str());
error.SetErrorStringWithFormat("no module basename in \"%s\"",
module_spec.GetPath().c_str());
}
return NULL;
}
swift::ModuleDecl *
SwiftASTContext::FindAndLoadModule(const SourceModule &module, Process &process,
Status &error) {
VALID_OR_RETURN(nullptr);
swift::ModuleDecl *swift_module = GetModule(module, error);
if (!swift_module)
return nullptr;
LoadModule(swift_module, process, error);
return swift_module;
}
swift::ModuleDecl *
SwiftASTContext::FindAndLoadModule(const FileSpec &module_spec,
Process &process, Status &error) {
VALID_OR_RETURN(nullptr);
swift::ModuleDecl *swift_module = GetModule(module_spec, error);
if (!swift_module)
return nullptr;
LoadModule(swift_module, process, error);
return swift_module;
}
bool SwiftASTContext::LoadOneImage(Process &process, FileSpec &link_lib_spec,
Status &error) {
VALID_OR_RETURN(false);
error.Clear();
PlatformSP platform_sp = process.GetTarget().GetPlatform();
if (platform_sp)
return platform_sp->LoadImage(&process, FileSpec(), link_lib_spec, error) !=
LLDB_INVALID_IMAGE_TOKEN;
else
return false;
}
static std::vector<std::string>
GetLibrarySearchPaths(const swift::SearchPathOptions &search_path_opts) {
// The order in which we look up the libraries is important. The REPL
// dlopen()s libswiftCore, and gives precedence to the just built standard
// library instead of the one in the OS. When we type `import Foundation`,
// we want to make sure we end up loading the correct library, i.e. the
// one sitting next to the stdlib we just built, and then fall back to the
// one in the OS if that's not available.
std::vector<std::string> paths;
if (!search_path_opts.RuntimeLibraryPath.empty())
paths.push_back(search_path_opts.RuntimeLibraryPath);
for (std::string path : search_path_opts.LibrarySearchPaths)
paths.push_back(path);
return paths;
}
void SwiftASTContext::LoadModule(swift::ModuleDecl *swift_module,
Process &process, Status &error) {
VALID_OR_RETURN_VOID();
Status current_error;
auto addLinkLibrary = [&](swift::LinkLibrary link_lib) {
Status load_image_error;
StreamString all_dlopen_errors;
const char *library_name = link_lib.getName().data();
if (library_name == NULL || library_name[0] == '\0') {
error.SetErrorString("Empty library name passed to addLinkLibrary");
return;
}
SwiftLanguageRuntime *runtime = process.GetSwiftLanguageRuntime();
if (runtime && runtime->IsInLibraryNegativeCache(library_name))
return;
swift::LibraryKind library_kind = link_lib.getKind();
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Loading link library \"%s\" of kind: %d.",
library_name, library_kind);
switch (library_kind) {
case swift::LibraryKind::Framework: {
// First make sure the library isn't already loaded. Since this
// is a framework, we make sure the file name and the framework
// name are the same, and that we are contained in
// FileName.framework with no other intervening frameworks. We
// can get more restrictive if this gives false positives.
ConstString library_cstr(library_name);
std::string framework_name(library_name);
framework_name.append(".framework");
// Lookup the module by file basename and make sure that
// basename has "<basename>.framework" in the path.
ModuleSpec module_spec;
module_spec.GetFileSpec().GetFilename() = library_cstr;
lldb_private::ModuleList matching_module_list;
bool module_already_loaded = false;
if (process.GetTarget().GetImages().FindModules(module_spec,
matching_module_list)) {
matching_module_list.ForEach(
[&module_already_loaded, &module_spec,
&framework_name](const ModuleSP &module_sp) -> bool {
module_already_loaded = module_spec.GetFileSpec().GetPath().find(
framework_name) != std::string::npos;
return module_already_loaded ==
false; // Keep iterating if we didn't find the right module
});
}
// If we already have this library loaded, don't try and load it again.
if (module_already_loaded) {
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"Skipping load of %s as it is already loaded.",
framework_name.c_str());
return;
}
for (auto module : process.GetTarget().GetImages().Modules()) {
FileSpec module_file = module->GetFileSpec();
if (module_file.GetFilename() == library_cstr) {
std::string module_path = module_file.GetPath();
size_t framework_offset = module_path.rfind(framework_name);
if (framework_offset != std::string::npos) {
// The Framework is already loaded, so we don't need to try to load
// it again.
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"Skipping load of %s as it is already loaded.",
framework_name.c_str());
return;
}
}
}
std::string framework_path("@rpath/");
framework_path.append(library_name);
framework_path.append(".framework/");
framework_path.append(library_name);
FileSpec framework_spec(framework_path.c_str());
if (LoadOneImage(process, framework_spec, load_image_error)) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Found framework at: %s.",
framework_path.c_str());
return;
} else
all_dlopen_errors.Printf("Looking for \"%s\", error: %s\n",
framework_path.c_str(),
load_image_error.AsCString());
// And then in the various framework search paths.
std::unordered_set<std::string> seen_paths;
std::vector<std::string> uniqued_paths;
for (const auto &framework_search_dir :
swift_module->getASTContext().SearchPathOpts.FrameworkSearchPaths) {
// The framework search dir as it comes from the AST context
// often has duplicate entries, don't try to load along the
// same path twice.
std::pair<std::unordered_set<std::string>::iterator, bool>
insert_result = seen_paths.insert(framework_search_dir.Path);
if (insert_result.second) {
framework_path = framework_search_dir.Path;
framework_path.append("/");
framework_path.append(library_name);
framework_path.append(".framework/");
uniqued_paths.push_back(framework_path);
}
}
uint32_t token = LLDB_INVALID_IMAGE_TOKEN;
PlatformSP platform_sp = process.GetTarget().GetPlatform();
Status error;
FileSpec library_spec(library_name);
FileSpec found_path;
if (platform_sp)
token = platform_sp->LoadImageUsingPaths(
&process, library_spec, uniqued_paths, error, &found_path);
if (token != LLDB_INVALID_IMAGE_TOKEN) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Found framework at: %s.",
framework_path.c_str());
return;
} else {
all_dlopen_errors.Printf("Failed to find framework for \"%s\" looking"
" along paths:\n",
library_name);
for (const std::string &path : uniqued_paths)
all_dlopen_errors.Printf(" %s\n", path.c_str());
}
// Maybe we were told to add a link library that exists in the
// system. I tried just specifying Foo.framework/Foo and
// letting the system search figure that out, but if
// DYLD_FRAMEWORK_FALLBACK_PATH is set (e.g. in Xcode's test
// scheme) then these aren't found. So for now I dial them in
// explicitly:
std::string system_path("/System/Library/Frameworks/");
system_path.append(library_name);
system_path.append(".framework/");
system_path.append(library_name);
framework_spec.SetFile(system_path.c_str(), FileSpec::Style::native);
if (LoadOneImage(process, framework_spec, load_image_error))
return;
else
all_dlopen_errors.Printf("Looking for \"%s\"\n, error: %s\n",
framework_path.c_str(),
load_image_error.AsCString());
} break;
case swift::LibraryKind::Library: {
std::vector<std::string> search_paths =
GetLibrarySearchPaths(swift_module->getASTContext().SearchPathOpts);
if (LoadLibraryUsingPaths(process, library_name, search_paths, true,
all_dlopen_errors))
return;
} break;
}
// If we get here, we aren't going to find this image, so add it to a
// negative cache:
if (runtime)
runtime->AddToLibraryNegativeCache(library_name);
current_error.SetErrorStringWithFormat(
"Failed to load linked library %s of module %s - errors:\n%s\n",
library_name, swift_module->getName().str().str().c_str(),
all_dlopen_errors.GetData());
};
swift_module->forAllVisibleModules(
{}, [&](swift::ModuleDecl::ImportedModule import) {
import.second->collectLinkLibraries(addLinkLibrary);
return true;
});
error = current_error;
}
bool SwiftASTContext::LoadLibraryUsingPaths(
Process &process, llvm::StringRef library_name,
std::vector<std::string> &search_paths, bool check_rpath,
StreamString &all_dlopen_errors) {
VALID_OR_RETURN(false);
SwiftLanguageRuntime *runtime = process.GetSwiftLanguageRuntime();
if (!runtime) {
all_dlopen_errors.PutCString(
"Can't load Swift libraries without a language runtime.");
return false;
}
if (ConstString::Equals(runtime->GetStandardLibraryBaseName(),
ConstString(library_name))) {
// Never dlopen the standard library. Some binaries statically
// link to the Swift standard library and dlopening it here will
// cause ObjC runtime conflicts. If you want to run Swift
// expressions you have to arrange to load the Swift standard
// library by hand before doing so.
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"Skipping swift standard library \"%s\" - we don't hand load "
"that one.",
runtime->GetStandardLibraryBaseName().AsCString());
return true;
}
PlatformSP platform_sp(process.GetTarget().GetPlatform());
std::string library_fullname;
if (platform_sp) {
library_fullname =
platform_sp->GetFullNameForDylib(ConstString(library_name)).AsCString();
} else // This is the old way, and we shouldn't use it except on Mac OS
{
#ifdef __APPLE__
library_fullname = "lib";
library_fullname.append(library_name);
library_fullname.append(".dylib");
#else
return false;
#endif
}
ModuleSpec module_spec;
module_spec.GetFileSpec().GetFilename().SetCString(library_fullname.c_str());
lldb_private::ModuleList matching_module_list;
if (process.GetTarget().GetImages().FindModules(module_spec,
matching_module_list) > 0) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Skipping module %s as it is already loaded.",
library_fullname.c_str());
return true;
}
std::string library_path;
std::unordered_set<std::string> seen_paths;
Status load_image_error;
std::vector<std::string> uniqued_paths;
for (const std::string &library_search_dir : search_paths) {
// The library search dir as it comes from the AST context often
// has duplicate entries, so lets unique the path list before we
// send it down to the target.
std::pair<std::unordered_set<std::string>::iterator, bool> insert_result =
seen_paths.insert(library_search_dir);
if (insert_result.second)
uniqued_paths.push_back(library_search_dir);
}
FileSpec library_spec(library_fullname);
FileSpec found_library;
uint32_t token = LLDB_INVALID_IMAGE_TOKEN;
Status error;
if (platform_sp)
token = platform_sp->LoadImageUsingPaths(
&process, library_spec, uniqued_paths, error, &found_library);
if (token != LLDB_INVALID_IMAGE_TOKEN) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Found library at: %s.",
found_library.GetCString());
return true;
} else {
all_dlopen_errors.Printf("Failed to find \"%s\" in paths:\n,",
library_fullname.c_str());
for (const std::string &search_dir : uniqued_paths)
all_dlopen_errors.Printf(" %s\n", search_dir.c_str());
}
if (check_rpath) {
// Let our RPATH help us out when finding the right library.
library_path = "@rpath/";
library_path += library_fullname;
FileSpec link_lib_spec(library_path.c_str());
if (LoadOneImage(process, link_lib_spec, load_image_error)) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "Found library using RPATH at: %s.",
library_path.c_str());
return true;
} else
all_dlopen_errors.Printf("Failed to find \"%s\" on RPATH, error: %s\n",
library_fullname.c_str(),
load_image_error.AsCString());
}
return false;
}
void SwiftASTContext::LoadExtraDylibs(Process &process, Status &error) {
VALID_OR_RETURN_VOID();
error.Clear();
swift::IRGenOptions &irgen_options = GetIRGenOptions();
for (const swift::LinkLibrary &link_lib : irgen_options.LinkLibraries) {
// We don't have to do frameworks here, they actually record their link
// libraries properly.
if (link_lib.getKind() == swift::LibraryKind::Library) {
const char *library_name = link_lib.getName().data();
StreamString errors;
std::vector<std::string> search_paths = GetLibrarySearchPaths(
m_compiler_invocation_ap->getSearchPathOptions());
bool success = LoadLibraryUsingPaths(process, library_name, search_paths,
false, errors);
if (!success) {
error.SetErrorString(errors.GetData());
}
}
}
}
static std::string GetBriefModuleName(Module &module) {
StreamString ss;
module.GetDescription(&ss, eDescriptionLevelBrief);
ss.Flush();
return ss.GetString().str();
}
bool SwiftASTContext::RegisterSectionModules(
Module &module, std::vector<std::string> &module_names) {
VALID_OR_RETURN(false);
swift::MemoryBufferSerializedModuleLoader *loader =
GetMemoryBufferModuleLoader();
if (!loader)
return false;
SectionList *section_list = module.GetSectionList();
if (!section_list)
return false;
SectionSP section_sp(
section_list->FindSectionByType(eSectionTypeSwiftModules, true));
if (section_sp) {
DataExtractor section_data;
if (section_sp->GetSectionData(section_data)) {
llvm::StringRef section_data_ref(
(const char *)section_data.GetDataStart(),
section_data.GetByteSize());
llvm::SmallVector<std::string, 4> llvm_modules;
if (swift::parseASTSection(*loader, section_data_ref, llvm_modules)) {
for (auto module_name : llvm_modules)
module_names.push_back(module_name);
return true;
}
}
} else {
if (m_ast_file_data_map.find(&module) != m_ast_file_data_map.end())
return true;
SymbolVendor *sym_vendor = module.GetSymbolVendor();
if (sym_vendor) {
// Grab all the AST blobs from the symbol vendor.
auto ast_file_datas = sym_vendor->GetASTData(eLanguageTypeSwift);
LOG_PRINTF(
LIBLLDB_LOG_TYPES,
"(\"%s\") retrieved %zu AST Data blobs from the symbol vendor.",
GetBriefModuleName(module).c_str(), ast_file_datas.size());
// Add each of the AST blobs to the vector of AST blobs for
// the module.
auto &ast_vector = GetASTVectorForModule(&module);
ast_vector.insert(ast_vector.end(), ast_file_datas.begin(),
ast_file_datas.end());
// Retrieve the module names from the AST blobs retrieved
// from the symbol vendor.
size_t parse_fail_count = 0;
size_t ast_number = 0;
for (auto ast_file_data_sp : ast_file_datas) {
// Parse the AST section info from the AST blob.
++ast_number;
llvm::StringRef section_data_ref(
(const char *)ast_file_data_sp->GetBytes(),
ast_file_data_sp->GetByteSize());
llvm::SmallVector<std::string, 4> swift_modules;
if (swift::parseASTSection(*loader, section_data_ref, swift_modules)) {
// Collect the Swift module names referenced by the AST.
for (auto module_name : swift_modules) {
module_names.push_back(module_name);
LOG_PRINTF(
LIBLLDB_LOG_TYPES,
"parsed module \"%s\" from Swift AST section %zu of %zu.",
module_name.c_str(), ast_number, ast_file_datas.size());
}
} else {
// Keep track of the fact that we failed to parse the AST section
// info.
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"failed to parse AST section %zu of %zu.", ast_number,
ast_file_datas.size());
++parse_fail_count;
}
}
if (!ast_file_datas.empty() && (parse_fail_count == 0)) {
// We found AST data entries and we successfully parsed all of them.
return true;
}
}
}
return false;
}
void SwiftASTContext::ValidateSectionModules(
Module &module, const std::vector<std::string> &module_names) {
VALID_OR_RETURN_VOID();
Status error;
for (const std::string &module_name : module_names) {
SourceModule module_info;
module_info.path.push_back(ConstString(module_name));
if (!GetModule(module_info, error))
module.ReportWarning("unable to load swift module \"%s\" (%s)",
module_name.c_str(), error.AsCString());
}
}
swift::Identifier SwiftASTContext::GetIdentifier(const char *name) {
VALID_OR_RETURN(swift::Identifier());
return GetASTContext()->getIdentifier(llvm::StringRef(name));
}
swift::Identifier SwiftASTContext::GetIdentifier(const llvm::StringRef &name) {
VALID_OR_RETURN(swift::Identifier());
return GetASTContext()->getIdentifier(name);
}
ConstString SwiftASTContext::GetMangledTypeName(swift::TypeBase *type_base) {
VALID_OR_RETURN(ConstString());
auto iter = m_type_to_mangled_name_map.find(type_base),
end = m_type_to_mangled_name_map.end();
if (iter != end)
return ConstString(iter->second);
swift::Type swift_type(type_base);
assert(!swift_type->hasArchetype() &&
"type has not been mapped out of context");
swift::Mangle::ASTMangler mangler(true);
std::string s = mangler.mangleTypeForDebugger(swift_type, nullptr);
if (s.empty())
return ConstString();
ConstString mangled_cs(s.c_str());
CacheDemangledType(mangled_cs.AsCString(), type_base);
return mangled_cs;
}
void SwiftASTContext::CacheDemangledType(const char *name,
swift::TypeBase *found_type) {
VALID_OR_RETURN_VOID();
m_type_to_mangled_name_map.insert(std::make_pair(found_type, name));
m_mangled_name_to_type_map.insert(std::make_pair(name, found_type));
}
void SwiftASTContext::CacheDemangledTypeFailure(const char *name) {
VALID_OR_RETURN_VOID();
m_negative_type_cache.Insert(name);
}
/// The old TypeReconstruction implementation would reconstruct SILFunctionTypes
/// with one argument T and one result U as an AST FunctionType (T) -> U;
/// anything with multiple arguments or results was reconstructed as () -> ().
///
/// Since this is non-sensical, let's just reconstruct all SILFunctionTypes as
/// () -> () for now.
///
/// What we should really do is only mangle AST types in DebugInfo, but that
/// requires some more plumbing on the Swift side to properly handle generic
/// specializations.
swift::Type convertSILFunctionTypesToASTFunctionTypes(swift::Type t) {
return t.transform([](swift::Type t) -> swift::Type {
if (auto *silFn = t->getAs<swift::SILFunctionType>())
return swift::FunctionType::get({}, t->getASTContext().TheEmptyTupleType);
return t;
});
}
CompilerType
SwiftASTContext::GetTypeFromMangledTypename(const char *mangled_typename,
Status &error) {
VALID_OR_RETURN(CompilerType());
if (!mangled_typename ||
!SwiftLanguageRuntime::IsSwiftMangledName(mangled_typename)) {
error.SetErrorStringWithFormat(
"typename \"%s\" is not a valid Swift mangled name", mangled_typename);
return {};
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\")", mangled_typename);
swift::ASTContext *ast_ctx = GetASTContext();
if (!ast_ctx) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") -- null Swift AST Context",
mangled_typename);
error.SetErrorString("null Swift AST Context");
return {};
}
error.Clear();
// If we were to crash doing this, remember what type caused it.
llvm::PrettyStackTraceFormat PST("error finding type for %s",
mangled_typename);
ConstString mangled_name(mangled_typename);
swift::TypeBase *found_type =
m_mangled_name_to_type_map.lookup(mangled_name.GetCString());
if (found_type) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") -- found in the positive cache",
mangled_typename);
assert(&found_type->getASTContext() == ast_ctx);
return {found_type};
}
if (m_negative_type_cache.Lookup(mangled_name.GetCString())) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") -- found in the negative cache",
mangled_typename);
return {};
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") -- not cached, searching",
mangled_typename);
found_type = swift::Demangle::getTypeForMangling(*ast_ctx, mangled_typename)
.getPointer();
if (found_type) {
found_type =
convertSILFunctionTypesToASTFunctionTypes(found_type).getPointer();
CacheDemangledType(mangled_name.GetCString(), found_type);
CompilerType result_type(found_type);
assert(&found_type->getASTContext() == ast_ctx);
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\") -- found %s", mangled_typename,
result_type.GetTypeName().GetCString());
return result_type;
}
LOG_PRINTF(LIBLLDB_LOG_TYPES, "(\"%s\")", mangled_typename);
error.SetErrorStringWithFormat("type for typename \"%s\" was not found",
mangled_typename);
CacheDemangledTypeFailure(mangled_name.GetCString());
return {};
}
CompilerType SwiftASTContext::GetAnyObjectType() {
VALID_OR_RETURN(CompilerType());
swift::ASTContext *ast = GetASTContext();
return {ast->getAnyObjectType()};
}
CompilerType SwiftASTContext::GetVoidFunctionType() {
VALID_OR_RETURN(CompilerType());
if (!m_void_function_type) {
swift::ASTContext *ast = GetASTContext();
swift::Type empty_tuple_type(swift::TupleType::getEmpty(*ast));
m_void_function_type = {swift::FunctionType::get({}, empty_tuple_type)};
}
return m_void_function_type;
}
static CompilerType ValueDeclToType(swift::ValueDecl *decl,
swift::ASTContext *ast) {
if (decl) {
switch (decl->getKind()) {
case swift::DeclKind::TypeAlias: {
swift::TypeAliasDecl *alias_decl =
swift::cast<swift::TypeAliasDecl>(decl);
if (alias_decl->hasInterfaceType()) {
swift::Type swift_type = swift::TypeAliasType::get(
alias_decl, swift::Type(), swift::SubstitutionMap(),
alias_decl->getUnderlyingTypeLoc().getType());
return {swift_type.getPointer()};
}
break;
}
case swift::DeclKind::Enum:
case swift::DeclKind::Struct:
case swift::DeclKind::Protocol:
case swift::DeclKind::Class: {
swift::NominalTypeDecl *nominal_decl =
swift::cast<swift::NominalTypeDecl>(decl);
if (nominal_decl->hasInterfaceType()) {
swift::Type swift_type = nominal_decl->getDeclaredType();
return {swift_type.getPointer()};
}
} break;
default:
break;
}
}
return CompilerType();
}
CompilerType SwiftASTContext::FindQualifiedType(const char *qualified_name) {
VALID_OR_RETURN(CompilerType());
if (qualified_name && qualified_name[0]) {
const char *dot_pos = strchr(qualified_name, '.');
if (dot_pos) {
ConstString module_name(qualified_name, dot_pos - qualified_name);
SourceModule module_info;
module_info.path.push_back(module_name);
swift::ModuleDecl *swift_module = GetCachedModule(module_info);
if (swift_module) {
swift::ModuleDecl::AccessPathTy access_path;
llvm::SmallVector<swift::ValueDecl *, 4> decls;
const char *module_type_name = dot_pos + 1;
swift_module->lookupValue(access_path, GetIdentifier(module_type_name),
swift::NLKind::UnqualifiedLookup, decls);
for (auto decl : decls) {
CompilerType type = ValueDeclToType(decl, GetASTContext());
if (type)
return type;
}
}
}
}
return {};
}
static CompilerType DeclToType(swift::Decl *decl, swift::ASTContext *ast) {
if (swift::ValueDecl *value_decl =
swift::dyn_cast_or_null<swift::ValueDecl>(decl))
return ValueDeclToType(value_decl, ast);
return {};
}
static SwiftASTContext::TypeOrDecl DeclToTypeOrDecl(swift::ASTContext *ast,
swift::Decl *decl) {
if (decl) {
switch (decl->getKind()) {
case swift::DeclKind::Import:
case swift::DeclKind::Extension:
case swift::DeclKind::PatternBinding:
case swift::DeclKind::TopLevelCode:
case swift::DeclKind::GenericTypeParam:
case swift::DeclKind::AssociatedType:
case swift::DeclKind::EnumElement:
case swift::DeclKind::EnumCase:
case swift::DeclKind::IfConfig:
case swift::DeclKind::Param:
case swift::DeclKind::Module:
case swift::DeclKind::MissingMember:
break;
case swift::DeclKind::InfixOperator:
case swift::DeclKind::PrefixOperator:
case swift::DeclKind::PostfixOperator:
case swift::DeclKind::PrecedenceGroup:
return decl;
case swift::DeclKind::TypeAlias: {
swift::TypeAliasDecl *alias_decl =
swift::cast<swift::TypeAliasDecl>(decl);
if (alias_decl->hasInterfaceType()) {
swift::Type swift_type = swift::TypeAliasType::get(
alias_decl, swift::Type(), swift::SubstitutionMap(),
alias_decl->getUnderlyingTypeLoc().getType());
return CompilerType(swift_type.getPointer());
}
} break;
case swift::DeclKind::Enum:
case swift::DeclKind::Struct:
case swift::DeclKind::Class:
case swift::DeclKind::Protocol: {
swift::NominalTypeDecl *nominal_decl =
swift::cast<swift::NominalTypeDecl>(decl);
if (nominal_decl->hasInterfaceType()) {
swift::Type swift_type = nominal_decl->getDeclaredType();
return CompilerType(swift_type.getPointer());
}
} break;
case swift::DeclKind::Func:
case swift::DeclKind::Var:
return decl;
case swift::DeclKind::Subscript:
case swift::DeclKind::Constructor:
case swift::DeclKind::Destructor:
break;
case swift::DeclKind::Accessor:
case swift::DeclKind::PoundDiagnostic:
break;
}
}
return CompilerType();
}
size_t
SwiftASTContext::FindContainedTypeOrDecl(llvm::StringRef name,
TypeOrDecl container_type_or_decl,
TypesOrDecls &results, bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
size_t size_before = results.size();
CompilerType container_type = container_type_or_decl.Apply<CompilerType>(
[](CompilerType type) -> CompilerType { return type; },
[this](swift::Decl *decl) -> CompilerType {
return DeclToType(decl, GetASTContext());
});
if (false == name.empty() &&
llvm::dyn_cast_or_null<SwiftASTContext>(container_type.GetTypeSystem())) {
swift::Type swift_type = GetSwiftType(container_type);
if (!swift_type)
return 0;
swift::CanType swift_can_type(swift_type->getCanonicalType());
swift::NominalType *nominal_type =
swift_can_type->getAs<swift::NominalType>();
if (!nominal_type)
return 0;
swift::NominalTypeDecl *nominal_decl = nominal_type->getDecl();
llvm::ArrayRef<swift::ValueDecl *> decls = nominal_decl->lookupDirect(
swift::DeclName(m_ast_context_ap->getIdentifier(name)));
for (auto decl : decls)
results.emplace(DeclToTypeOrDecl(GetASTContext(), decl));
}
return results.size() - size_before;
}
CompilerType SwiftASTContext::FindType(const char *name,
swift::ModuleDecl *swift_module) {
VALID_OR_RETURN(CompilerType());
std::set<CompilerType> search_results;
FindTypes(name, swift_module, search_results, false);
if (search_results.empty())
return {};
else
return *search_results.begin();
}
llvm::Optional<SwiftASTContext::TypeOrDecl>
SwiftASTContext::FindTypeOrDecl(const char *name,
swift::ModuleDecl *swift_module) {
VALID_OR_RETURN(llvm::Optional<SwiftASTContext::TypeOrDecl>());
TypesOrDecls search_results;
FindTypesOrDecls(name, swift_module, search_results, false);
if (search_results.empty())
return llvm::Optional<SwiftASTContext::TypeOrDecl>();
else
return *search_results.begin();
}
size_t SwiftASTContext::FindTypes(const char *name,
swift::ModuleDecl *swift_module,
std::set<CompilerType> &results,
bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
size_t before = results.size();
TypesOrDecls types_or_decls_results;
FindTypesOrDecls(name, swift_module, types_or_decls_results);
for (const auto &result : types_or_decls_results) {
CompilerType type = result.Apply<CompilerType>(
[](CompilerType type) -> CompilerType { return type; },
[this](swift::Decl *decl) -> CompilerType {
if (swift::ValueDecl *value_decl =
swift::dyn_cast_or_null<swift::ValueDecl>(decl)) {
if (value_decl->hasInterfaceType()) {
swift::Type swift_type = value_decl->getInterfaceType();
swift::MetatypeType *meta_type =
swift_type->getAs<swift::MetatypeType>();
swift::ASTContext *ast = GetASTContext();
if (meta_type)
return {meta_type->getInstanceType().getPointer()};
else
return {swift_type.getPointer()};
}
}
return CompilerType();
});
results.emplace(type);
}
return results.size() - before;
}
size_t SwiftASTContext::FindTypesOrDecls(const char *name,
swift::ModuleDecl *swift_module,
TypesOrDecls &results, bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
size_t before = results.size();
if (name && name[0] && swift_module) {
swift::ModuleDecl::AccessPathTy access_path;
llvm::SmallVector<swift::ValueDecl *, 4> value_decls;
swift::Identifier identifier(GetIdentifier(name));
if (strchr(name, '.'))
swift_module->lookupValue(access_path, identifier,
swift::NLKind::QualifiedLookup, value_decls);
else
swift_module->lookupValue(access_path, identifier,
swift::NLKind::UnqualifiedLookup, value_decls);
if (identifier.isOperator()) {
swift::OperatorDecl *op_decl =
swift_module->lookupPrefixOperator(identifier);
if (op_decl)
results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl));
if ((op_decl = swift_module->lookupInfixOperator(identifier)))
results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl));
if ((op_decl = swift_module->lookupPostfixOperator(identifier)))
results.emplace(DeclToTypeOrDecl(GetASTContext(), op_decl));
}
if (swift::PrecedenceGroupDecl *pg_decl =
swift_module->lookupPrecedenceGroup(identifier))
results.emplace(DeclToTypeOrDecl(GetASTContext(), pg_decl));
for (auto decl : value_decls)
results.emplace(DeclToTypeOrDecl(GetASTContext(), decl));
}
return results.size() - before;
}
size_t SwiftASTContext::FindType(const char *name,
std::set<CompilerType> &results, bool append) {
VALID_OR_RETURN(0);
if (!append)
results.clear();
auto iter = m_swift_module_cache.begin(), end = m_swift_module_cache.end();
size_t count = 0;
std::function<void(swift::ModuleDecl *)> lookup_func =
[this, name, &results, &count](swift::ModuleDecl *module) -> void {
CompilerType candidate(this->FindType(name, module));
if (candidate) {
++count;
results.insert(candidate);
}
};
for (; iter != end; iter++)
lookup_func(iter->second);
if (m_scratch_module)
lookup_func(m_scratch_module);
return count;
}
CompilerType SwiftASTContext::ImportType(CompilerType &type, Status &error) {
VALID_OR_RETURN(CompilerType());
if (m_ast_context_ap.get() == NULL)
return CompilerType();
SwiftASTContext *swift_ast_ctx =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem());
if (swift_ast_ctx == nullptr) {
error.SetErrorString("Can't import clang type into a Swift ASTContext.");
return CompilerType();
} else if (swift_ast_ctx == this) {
// This is the same AST context, so the type is already imported.
return type;
}
// For now we're going to do this all using mangled names. If we
// find that is too slow, we can use the TypeBase * in the
// CompilerType to match this to the version of the type we got from
// the mangled name in the original swift::ASTContext.
ConstString mangled_name(type.GetMangledTypeName());
if (mangled_name) {
swift::TypeBase *our_type_base =
m_mangled_name_to_type_map.lookup(mangled_name.GetCString());
if (our_type_base)
return {our_type_base};
else {
Status error;
CompilerType our_type(
GetTypeFromMangledTypename(mangled_name.GetCString(), error));
if (error.Success())
return our_type;
}
}
return {};
}
swift::IRGenDebugInfoLevel SwiftASTContext::GetGenerateDebugInfo() {
return GetIRGenOptions().DebugInfoLevel;
}
swift::PrintOptions SwiftASTContext::GetUserVisibleTypePrintingOptions(
bool print_help_if_available) {
swift::PrintOptions print_options;
print_options.SynthesizeSugarOnTypes = true;
print_options.VarInitializers = true;
print_options.TypeDefinitions = true;
print_options.PrintGetSetOnRWProperties = true;
print_options.SkipImplicit = false;
print_options.PreferTypeRepr = true;
print_options.FunctionDefinitions = true;
print_options.FullyQualifiedTypesIfAmbiguous = true;
print_options.FullyQualifiedTypes = true;
print_options.ExplodePatternBindingDecls = false;
print_options.PrintDocumentationComments =
print_options.PrintRegularClangComments = print_help_if_available;
return print_options;
}
void SwiftASTContext::SetGenerateDebugInfo(swift::IRGenDebugInfoLevel b) {
GetIRGenOptions().DebugInfoLevel = b;
}
llvm::TargetOptions *SwiftASTContext::getTargetOptions() {
if (m_target_options_ap.get() == NULL) {
m_target_options_ap.reset(new llvm::TargetOptions());
}
return m_target_options_ap.get();
}
swift::ModuleDecl *SwiftASTContext::GetScratchModule() {
VALID_OR_RETURN(nullptr);
if (m_scratch_module == nullptr)
m_scratch_module = swift::ModuleDecl::create(
GetASTContext()->getIdentifier("__lldb_scratch_module"),
*GetASTContext());
return m_scratch_module;
}
swift::SILModule *SwiftASTContext::GetSILModule() {
VALID_OR_RETURN(nullptr);
if (m_sil_module_ap.get() == NULL)
m_sil_module_ap = swift::SILModule::createEmptyModule(GetScratchModule(),
GetSILOptions());
return m_sil_module_ap.get();
}
swift::irgen::IRGenerator &
SwiftASTContext::GetIRGenerator(swift::IRGenOptions &opts,
swift::SILModule &module) {
if (m_ir_generator_ap.get() == nullptr) {
m_ir_generator_ap.reset(new swift::irgen::IRGenerator(opts, module));
}
return *m_ir_generator_ap.get();
}
swift::irgen::IRGenModule &SwiftASTContext::GetIRGenModule() {
VALID_OR_RETURN(*m_ir_gen_module_ap);
llvm::call_once(m_ir_gen_module_once, [this]() {
// Make sure we have a good ClangImporter.
GetClangImporter();
swift::IRGenOptions &ir_gen_opts = GetIRGenOptions();
std::string error_str;
llvm::Triple llvm_triple = GetTriple();
const llvm::Target *llvm_target =
llvm::TargetRegistry::lookupTarget(llvm_triple.str(), error_str);
llvm::CodeGenOpt::Level optimization_level = llvm::CodeGenOpt::Level::None;
// Create a target machine.
llvm::TargetMachine *target_machine = llvm_target->createTargetMachine(
llvm_triple.str(),
"generic", // cpu
"", // features
*getTargetOptions(),
llvm::Reloc::Static, // TODO verify with Sean, Default went away
llvm::None, optimization_level);
if (target_machine) {
// Set the module's string representation.
const llvm::DataLayout data_layout = target_machine->createDataLayout();
swift::SILModule *sil_module = GetSILModule();
if (sil_module != nullptr) {
swift::irgen::IRGenerator &ir_generator =
GetIRGenerator(ir_gen_opts, *sil_module);
swift::PrimarySpecificPaths PSPs =
GetCompilerInvocation()
.getFrontendOptions()
.InputsAndOutputs.getPrimarySpecificPathsForAtMostOnePrimary();
std::lock_guard<std::recursive_mutex> global_context_locker(
IRExecutionUnit::GetLLVMGlobalContextMutex());
m_ir_gen_module_ap.reset(new swift::irgen::IRGenModule(
ir_generator, ir_generator.createTargetMachine(), nullptr,
GetGlobalLLVMContext(), ir_gen_opts.ModuleName, PSPs.OutputFilename,
PSPs.MainInputFilenameForDebugInfo));
llvm::Module *llvm_module = m_ir_gen_module_ap->getModule();
llvm_module->setDataLayout(data_layout.getStringRepresentation());
llvm_module->setTargetTriple(llvm_triple.str());
}
}
});
return *m_ir_gen_module_ap;
}
CompilerType
SwiftASTContext::CreateTupleType(const std::vector<CompilerType> &elements) {
VALID_OR_RETURN(CompilerType());
Status error;
if (elements.size() == 0)
return {GetASTContext()->TheEmptyTupleType};
else {
std::vector<swift::TupleTypeElt> tuple_elems;
for (const CompilerType &type : elements) {
if (auto swift_type = GetSwiftType(type))
tuple_elems.push_back(swift::TupleTypeElt(swift_type));
else
return CompilerType();
}
llvm::ArrayRef<swift::TupleTypeElt> fields(tuple_elems);
return {swift::TupleType::get(fields, *GetASTContext()).getPointer()};
}
}
CompilerType
SwiftASTContext::CreateTupleType(const std::vector<TupleElement> &elements) {
VALID_OR_RETURN(CompilerType());
Status error;
if (elements.size() == 0)
return {GetASTContext()->TheEmptyTupleType};
else {
std::vector<swift::TupleTypeElt> tuple_elems;
for (const TupleElement &element : elements) {
if (auto swift_type = GetSwiftType(element.element_type)) {
if (element.element_name.IsEmpty())
tuple_elems.push_back(swift::TupleTypeElt(swift_type));
else
tuple_elems.push_back(swift::TupleTypeElt(
swift_type, m_ast_context_ap->getIdentifier(
element.element_name.GetCString())));
} else
return {};
}
llvm::ArrayRef<swift::TupleTypeElt> fields(tuple_elems);
return {swift::TupleType::get(fields, *GetASTContext()).getPointer()};
}
}
CompilerType SwiftASTContext::GetErrorType() {
VALID_OR_RETURN(CompilerType());
swift::ASTContext *swift_ctx = GetASTContext();
if (swift_ctx) {
// Getting the error type requires the Stdlib module be loaded,
// but doesn't cause it to be loaded. Do that here.
swift_ctx->getStdlibModule(true);
swift::NominalTypeDecl *error_type_decl = GetASTContext()->getErrorDecl();
if (error_type_decl) {
auto error_type = error_type_decl->getDeclaredType().getPointer();
return {error_type};
}
}
return {};
}
CompilerType SwiftASTContext::GetNSErrorType(Status &error) {
VALID_OR_RETURN(CompilerType());
return GetTypeFromMangledTypename(
SwiftLanguageRuntime::GetCurrentMangledName("_TtC10Foundation7NSError")
.c_str(),
error);
}
CompilerType SwiftASTContext::CreateMetatypeType(CompilerType instance_type) {
VALID_OR_RETURN(CompilerType());
if (llvm::dyn_cast_or_null<SwiftASTContext>(instance_type.GetTypeSystem()))
return {swift::MetatypeType::get(GetSwiftType(instance_type),
*GetASTContext())};
return {};
}
SwiftASTContext *SwiftASTContext::GetSwiftASTContext(swift::ASTContext *ast) {
SwiftASTContext *swift_ast = GetASTMap().Lookup(ast);
return swift_ast;
}
uint32_t SwiftASTContext::GetPointerByteSize() {
VALID_OR_RETURN(0);
if (m_pointer_byte_size == 0)
m_pointer_byte_size =
CompilerType(GetASTContext()->TheRawPointerType.getPointer())
.GetByteSize(nullptr)
.getValueOr(0);
return m_pointer_byte_size;
}
uint32_t SwiftASTContext::GetPointerBitAlignment() {
VALID_OR_RETURN(0);
if (m_pointer_bit_align == 0) {
swift::ASTContext *ast = GetASTContext();
m_pointer_bit_align =
CompilerType(ast->TheRawPointerType.getPointer()).GetAlignedBitSize();
}
return m_pointer_bit_align;
}
bool SwiftASTContext::HasErrors() {
if (m_diagnostic_consumer_ap.get())
return (
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->NumErrors() != 0);
else
return false;
}
bool SwiftASTContext::HasFatalErrors(swift::ASTContext *ast_context) {
return (ast_context && ast_context->Diags.hasFatalErrorOccurred());
}
void SwiftASTContext::ClearDiagnostics() {
assert(!HasFatalErrors() && "Never clear a fatal diagnostic!");
if (m_diagnostic_consumer_ap.get())
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->Clear();
}
bool SwiftASTContext::SetColorizeDiagnostics(bool b) {
if (m_diagnostic_consumer_ap.get())
return static_cast<StoringDiagnosticConsumer *>(
m_diagnostic_consumer_ap.get())
->SetColorize(b);
return false;
}
void SwiftASTContext::PrintDiagnostics(DiagnosticManager &diagnostic_manager,
uint32_t bufferID, uint32_t first_line,
uint32_t last_line) {
// If this is a fatal error, copy the error into the AST context's
// fatal error field, and then put it to the stream, otherwise just
// dump the diagnostics to the stream.
// N.B. you cannot use VALID_OR_RETURN_VOID here since that exits if
// you have fatal errors, which are what we are trying to print
// here.
if (!m_ast_context_ap.get()) {
SymbolFile *sym_file = GetSymbolFile();
if (sym_file) {
ConstString name =
sym_file->GetObjectFile()->GetModule()->GetObjectName();
m_fatal_errors.SetErrorStringWithFormat("Null context for %s.",
name.AsCString());
} else {
m_fatal_errors.SetErrorString("Unknown fatal error occurred.");
}
return;
}
if (m_ast_context_ap->Diags.hasFatalErrorOccurred() &&
!m_reported_fatal_error) {
DiagnosticManager fatal_diagnostics;
if (m_diagnostic_consumer_ap.get())
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->PrintDiagnostics(fatal_diagnostics, bufferID, first_line,
last_line);
if (fatal_diagnostics.Diagnostics().size())
m_fatal_errors.SetErrorString(fatal_diagnostics.GetString().data());
else
m_fatal_errors.SetErrorString("Unknown fatal error occurred.");
m_reported_fatal_error = true;
for (const DiagnosticList::value_type &fatal_diagnostic :
fatal_diagnostics.Diagnostics()) {
// FIXME: Need to add a CopyDiagnostic operation for copying
// diagnostics from one manager to another.
diagnostic_manager.AddDiagnostic(
fatal_diagnostic->GetMessage(), fatal_diagnostic->GetSeverity(),
fatal_diagnostic->getKind(), fatal_diagnostic->GetCompilerID());
}
} else {
if (m_diagnostic_consumer_ap.get())
static_cast<StoringDiagnosticConsumer *>(m_diagnostic_consumer_ap.get())
->PrintDiagnostics(diagnostic_manager, bufferID, first_line,
last_line);
}
}
void SwiftASTContext::ModulesDidLoad(ModuleList &module_list) {
ClearModuleDependentCaches();
}
void SwiftASTContext::ClearModuleDependentCaches() {
m_negative_type_cache.Clear();
}
void SwiftASTContext::LogConfiguration() {
VALID_OR_RETURN_VOID();
Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_TYPES));
if (!log)
return;
LOG_PRINTF(LIBLLDB_LOG_TYPES,
"(SwiftASTContext*)%p:", static_cast<void *>(this));
if (!m_ast_context_ap)
log->Printf(" (no AST context)");
log->Printf(" Architecture : %s",
m_ast_context_ap->LangOpts.Target.getTriple().c_str());
log->Printf(" SDK path : %s",
m_ast_context_ap->SearchPathOpts.SDKPath.c_str());
log->Printf(" Runtime resource path : %s",
m_ast_context_ap->SearchPathOpts.RuntimeResourcePath.c_str());
log->Printf(" Runtime library path : %s",
m_ast_context_ap->SearchPathOpts.RuntimeLibraryPath.c_str());
log->Printf(" Runtime library import paths : (%llu items)",
(unsigned long long)m_ast_context_ap->SearchPathOpts
.RuntimeLibraryImportPaths.size());
for (const auto &runtime_import_path :
m_ast_context_ap->SearchPathOpts.RuntimeLibraryImportPaths) {
log->Printf(" %s", runtime_import_path.c_str());
}
log->Printf(" Framework search paths : (%llu items)",
(unsigned long long)
m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths.size());
for (const auto &framework_search_path :
m_ast_context_ap->SearchPathOpts.FrameworkSearchPaths) {
log->Printf(" %s", framework_search_path.Path.c_str());
}
log->Printf(" Import search paths : (%llu items)",
(unsigned long long)
m_ast_context_ap->SearchPathOpts.ImportSearchPaths.size());
for (std::string &import_search_path :
m_ast_context_ap->SearchPathOpts.ImportSearchPaths) {
log->Printf(" %s", import_search_path.c_str());
}
swift::ClangImporterOptions &clang_importer_options =
GetClangImporterOptions();
log->Printf(" Extra clang arguments : (%llu items)",
(unsigned long long)clang_importer_options.ExtraArgs.size());
for (std::string &extra_arg : clang_importer_options.ExtraArgs) {
log->Printf(" %s", extra_arg.c_str());
}
}
bool SwiftASTContext::HasTarget() const {
lldb::TargetWP empty_wp;
// If either call to "std::weak_ptr::owner_before(...) value returns
// true, this indicates that m_section_wp once contained (possibly
// still does) a reference to a valid shared pointer. This helps us
// know if we had a valid reference to a target which is now invalid
// because the target was deleted.
return empty_wp.owner_before(m_target_wp) ||
m_target_wp.owner_before(empty_wp);
}
bool SwiftASTContext::CheckProcessChanged() {
if (HasTarget()) {
TargetSP target_sp(m_target_wp.lock());
if (target_sp) {
Process *process = target_sp->GetProcessSP().get();
if (m_process == NULL) {
if (process)
m_process = process;
} else {
if (m_process != process)
return true;
}
}
}
return false;
}
void SwiftASTContext::AddDebuggerClient(
swift::DebuggerClient *debugger_client) {
m_debugger_clients.push_back(
std::unique_ptr<swift::DebuggerClient>(debugger_client));
}
bool SwiftASTContext::DeclContextIsStructUnionOrClass(void *opaque_decl_ctx) {
return false;
}
ConstString SwiftASTContext::DeclContextGetName(void *opaque_decl_ctx) {
return ConstString();
}
ConstString
SwiftASTContext::DeclContextGetScopeQualifiedName(void *opaque_decl_ctx) {
return ConstString();
}
bool SwiftASTContext::DeclContextIsClassMethod(
void *opaque_decl_ctx, lldb::LanguageType *language_ptr,
bool *is_instance_method_ptr, ConstString *language_object_name_ptr) {
return false;
}
///////////
////////////////////
///////////
bool SwiftASTContext::IsArrayType(void *type, CompilerType *element_type_ptr,
uint64_t *size, bool *is_incomplete) {
VALID_OR_RETURN(false);
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::BoundGenericStructType *struct_type =
swift_can_type->getAs<swift::BoundGenericStructType>();
if (struct_type) {
swift::StructDecl *struct_decl = struct_type->getDecl();
if (strcmp(struct_decl->getName().get(), "Array") != 0)
return false;
if (!struct_decl->getModuleContext()->isStdlibModule())
return false;
const llvm::ArrayRef<swift::Type> &args = struct_type->getGenericArgs();
if (args.size() != 1)
return false;
if (is_incomplete)
*is_incomplete = true;
if (size)
*size = 0;
if (element_type_ptr)
*element_type_ptr = CompilerType(args[0].getPointer());
return true;
}
return false;
}
bool SwiftASTContext::IsAggregateType(void *type) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto referent_type = swift_can_type->getReferenceStorageReferent();
return (referent_type->is<swift::TupleType>() ||
referent_type->is<swift::BuiltinVectorType>() ||
referent_type->getAnyNominal());
}
return false;
}
bool SwiftASTContext::IsVectorType(void *type, CompilerType *element_type,
uint64_t *size) {
return false;
}
bool SwiftASTContext::IsRuntimeGeneratedType(void *type) { return false; }
bool SwiftASTContext::IsCharType(void *type) { return false; }
bool SwiftASTContext::IsCompleteType(void *type) { return true; }
bool SwiftASTContext::IsConst(void *type) { return false; }
bool SwiftASTContext::IsCStringType(void *type, uint32_t &length) {
return false;
}
bool SwiftASTContext::IsFunctionType(void *type, bool *is_variadic_ptr) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
return true;
case swift::TypeKind::SILFunction:
return false; // TODO: is this correct?
default:
return false;
}
}
return false;
}
/// Used to detect "Homogeneous Floating-point Aggregates"
uint32_t SwiftASTContext::IsHomogeneousAggregate(void *type,
CompilerType *base_type_ptr) {
return 0;
}
size_t SwiftASTContext::GetNumberOfFunctionArguments(void *type) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto func = swift::dyn_cast_or_null<swift::AnyFunctionType>(swift_can_type);
if (func) {
return func.getParams().size();
}
}
return 0;
}
CompilerType SwiftASTContext::GetFunctionArgumentAtIndex(void *type,
const size_t index) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto func = swift::dyn_cast<swift::AnyFunctionType>(swift_can_type);
if (func) {
auto params = func.getParams();
if (index < params.size()) {
auto param = params[index];
return {this, param.getParameterType().getPointer()};
}
}
}
return {};
}
bool SwiftASTContext::IsFunctionPointerType(void *type) {
return IsFunctionType(type, nullptr); // FIXME: think about this
}
bool SwiftASTContext::IsBlockPointerType(
void *type, CompilerType *function_pointer_type_ptr) {
return false;
}
bool SwiftASTContext::IsIntegerType(void *type, bool &is_signed) {
return (GetTypeInfo(type, nullptr) & eTypeIsInteger);
}
bool SwiftASTContext::IsPointerType(void *type, CompilerType *pointee_type) {
VALID_OR_RETURN(false);
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto referent_type = swift_can_type->getReferenceStorageReferent();
return (referent_type->is<swift::BuiltinRawPointerType>() ||
referent_type->is<swift::BuiltinNativeObjectType>() ||
referent_type->is<swift::BuiltinUnsafeValueBufferType>() ||
referent_type->is<swift::BuiltinUnknownObjectType>() ||
referent_type->is<swift::BuiltinBridgeObjectType>());
}
if (pointee_type)
pointee_type->Clear();
return false;
}
bool SwiftASTContext::IsPointerOrReferenceType(void *type,
CompilerType *pointee_type) {
return IsPointerType(type, pointee_type) ||
IsReferenceType(type, pointee_type, nullptr);
}
bool SwiftASTContext::ShouldTreatScalarValueAsAddress(
lldb::opaque_compiler_type_t type) {
return Flags(GetTypeInfo(type, nullptr))
.AnySet(eTypeInstanceIsPointer | eTypeIsReference);
}
bool SwiftASTContext::IsReferenceType(void *type, CompilerType *pointee_type,
bool *is_rvalue) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::LValue:
if (pointee_type)
*pointee_type = GetNonReferenceType(type);
return true;
default:
break;
}
}
if (pointee_type)
pointee_type->Clear();
return false;
}
bool SwiftASTContext::IsFloatingPointType(void *type, uint32_t &count,
bool &is_complex) {
if (type) {
if (GetTypeInfo(type, nullptr) & eTypeIsFloat) {
count = 1;
is_complex = false;
return true;
}
}
count = 0;
is_complex = false;
return false;
}
bool SwiftASTContext::IsDefined(void *type) {
if (!type)
return false;
return true;
}
bool SwiftASTContext::IsPolymorphicClass(void *type) { return false; }
bool SwiftASTContext::IsPossibleDynamicType(void *type,
CompilerType *dynamic_pointee_type,
bool check_cplusplus,
bool check_objc, bool check_swift) {
VALID_OR_RETURN(false);
if (type && check_swift) {
auto can_type = GetCanonicalSwiftType(type);
if (can_type->getClassOrBoundGenericClass() ||
can_type->isAnyExistentialType())
return true;
if (can_type->hasArchetype() || can_type->hasOpaqueArchetype() ||
can_type->hasTypeParameter())
return true;
if (can_type == GetASTContext()->TheRawPointerType)
return true;
if (can_type == GetASTContext()->TheUnknownObjectType)
return true;
if (can_type == GetASTContext()->TheNativeObjectType)
return true;
if (can_type == GetASTContext()->TheBridgeObjectType)
return true;
}
if (dynamic_pointee_type)
dynamic_pointee_type->Clear();
return false;
}
bool SwiftASTContext::IsScalarType(void *type) {
if (!type)
return false;
return (GetTypeInfo(type, nullptr) & eTypeIsScalar) != 0;
}
bool SwiftASTContext::IsTypedefType(void *type) {
if (!type)
return false;
swift::Type swift_type(GetSwiftType(type));
return swift::isa<swift::TypeAliasType>(swift_type.getPointer());
}
bool SwiftASTContext::IsVoidType(void *type) {
VALID_OR_RETURN(false);
if (!type)
return false;
return type == GetASTContext()->TheEmptyTupleType.getPointer();
}
bool SwiftASTContext::IsGenericType(const CompilerType &compiler_type) {
if (!compiler_type.IsValid())
return false;
if (llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
swift::Type swift_type(GetSwiftType(compiler_type));
return swift_type->hasTypeParameter(); // is<swift::ArchetypeType>();
}
return false;
}
static CompilerType BindAllArchetypes(CompilerType type,
ExecutionContextScope *exe_scope) {
if (!exe_scope)
return type;
auto *frame = exe_scope->CalculateStackFrame().get();
auto *runtime = exe_scope->CalculateProcess()->GetSwiftLanguageRuntime();
if (!frame || !runtime)
return type;
ExecutionContext exe_ctx;
exe_scope->CalculateExecutionContext(exe_ctx);
return runtime->DoArchetypeBindingForType(*frame, type);
}
bool SwiftASTContext::IsErrorType(const CompilerType &compiler_type) {
if (compiler_type.IsValid() &&
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
ProtocolInfo protocol_info;
if (GetProtocolTypeInfo(compiler_type, protocol_info))
return protocol_info.m_is_errortype;
return false;
}
return false;
}
CompilerType
SwiftASTContext::GetReferentType(const CompilerType &compiler_type) {
VALID_OR_RETURN(CompilerType());
if (compiler_type.IsValid() &&
llvm::dyn_cast_or_null<SwiftASTContext>(compiler_type.GetTypeSystem())) {
swift::Type swift_type(GetSwiftType(compiler_type));
swift::TypeBase *swift_typebase = swift_type.getPointer();
if (swift_type && llvm::isa<swift::WeakStorageType>(swift_typebase))
return compiler_type;
auto ref_type = swift_type->getReferenceStorageReferent();
return {ref_type};
}
return {};
}
bool SwiftASTContext::IsFullyRealized(const CompilerType &compiler_type) {
if (!compiler_type.IsValid())
return false;
if (auto ast = llvm::dyn_cast_or_null<SwiftASTContext>(
compiler_type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(compiler_type));
if (swift::isa<swift::MetatypeType>(swift_can_type))
return true;
return !swift_can_type->hasArchetype() &&
!swift_can_type->hasTypeParameter();
}
return false;
}
bool SwiftASTContext::GetProtocolTypeInfo(const CompilerType &type,
ProtocolInfo &protocol_info) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (!swift_can_type.isExistentialType())
return false;
swift::ExistentialLayout layout = swift_can_type.getExistentialLayout();
protocol_info.m_is_class_only = layout.requiresClass();
protocol_info.m_num_protocols = layout.getProtocols().size();
protocol_info.m_is_objc = layout.isObjC();
protocol_info.m_is_anyobject = layout.isAnyObject();
protocol_info.m_is_errortype = layout.isErrorExistential();
if (auto superclass = layout.explicitSuperclass) {
protocol_info.m_superclass = {superclass.getPointer()};
}
unsigned num_witness_tables = 0;
for (auto protoTy : layout.getProtocols()) {
if (!protoTy->getDecl()->isObjC())
num_witness_tables++;
}
if (layout.isErrorExistential()) {
// Error existential -- instance pointer only.
protocol_info.m_num_payload_words = 0;
protocol_info.m_num_storage_words = 1;
} else if (layout.requiresClass()) {
// Class-constrained existential -- instance pointer plus
// witness tables.
protocol_info.m_num_payload_words = 0;
protocol_info.m_num_storage_words = 1 + num_witness_tables;
} else {
// Opaque existential -- three words of inline storage, metadata
// and witness tables.
protocol_info.m_num_payload_words = swift::NumWords_ValueBuffer;
protocol_info.m_num_storage_words =
swift::NumWords_ValueBuffer + 1 + num_witness_tables;
}
return true;
}
return false;
}
SwiftASTContext::TypeAllocationStrategy
SwiftASTContext::GetAllocationStrategy(const CompilerType &type) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
const swift::irgen::TypeInfo *type_info =
ast->GetSwiftTypeInfo(type.GetOpaqueQualType());
if (!type_info)
return TypeAllocationStrategy::eUnknown;
switch (type_info->getFixedPacking(ast->GetIRGenModule())) {
case swift::irgen::FixedPacking::OffsetZero:
return TypeAllocationStrategy::eInline;
case swift::irgen::FixedPacking::Allocate:
return TypeAllocationStrategy::ePointer;
case swift::irgen::FixedPacking::Dynamic:
return TypeAllocationStrategy::eDynamic;
}
}
return TypeAllocationStrategy::eUnknown;
}
bool SwiftASTContext::IsBeingDefined(void *type) { return false; }
bool SwiftASTContext::IsObjCObjectPointerType(const CompilerType &type,
CompilerType *class_type_ptr) {
if (!type)
return false;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
if (type_kind == swift::TypeKind::BuiltinNativeObject ||
type_kind == swift::TypeKind::BuiltinUnknownObject)
return true;
if (class_type_ptr)
class_type_ptr->Clear();
return false;
}
//----------------------------------------------------------------------
// Type Completion
//----------------------------------------------------------------------
bool SwiftASTContext::GetCompleteType(void *type) { return true; }
ConstString SwiftASTContext::GetTypeName(void *type) {
std::string type_name;
if (type) {
swift::Type swift_type(GetSwiftType(type));
swift::Type normalized_type =
swift_type.transform([](swift::Type type) -> swift::Type {
if (swift::SyntaxSugarType *syntax_sugar_type =
swift::dyn_cast<swift::SyntaxSugarType>(type.getPointer())) {
return syntax_sugar_type->getSinglyDesugaredType();
}
if (swift::DictionaryType *dictionary_type =
swift::dyn_cast<swift::DictionaryType>(type.getPointer())) {
return dictionary_type->getSinglyDesugaredType();
}
return type;
});
swift::PrintOptions print_options;
print_options.FullyQualifiedTypes = true;
print_options.SynthesizeSugarOnTypes = false;
type_name = normalized_type.getString(print_options);
}
return ConstString(type_name);
}
/// Build a dictionary of Archetype names that appear in \p type.
static llvm::DenseMap<swift::CanType, swift::Identifier>
GetArchetypeNames(swift::Type type, swift::ASTContext &ast_ctx,
const SymbolContext *sc) {
llvm::DenseMap<swift::CanType, swift::Identifier> dict;
swift::Type swift_type(GetSwiftType(type));
assert(&swift_type->getASTContext() == &ast_ctx);
if (!sc)
return dict;
llvm::DenseMap<std::pair<uint64_t, uint64_t>, StringRef> names;
SwiftLanguageRuntime::GetGenericParameterNamesForFunction(*sc, names);
swift_type.visit([&](swift::Type type) {
if (!type->isTypeParameter() || dict.count(type->getCanonicalType()))
return;
auto *param = type->getAs<swift::GenericTypeParamType>();
auto it = names.find({param->getDepth(), param->getIndex()});
if (it != names.end()) {
swift::Identifier ident = ast_ctx.getIdentifier(it->second);
dict.insert({type->getCanonicalType(), ident});
}
});
return dict;
}
ConstString SwiftASTContext::GetDisplayTypeName(void *type,
const SymbolContext *sc) {
VALID_OR_RETURN(ConstString("<invalid Swift context>"));
std::string type_name(GetTypeName(type).AsCString(""));
if (type) {
swift::Type swift_type(GetSwiftType(type));
swift::PrintOptions print_options;
print_options.FullyQualifiedTypes = false;
print_options.SynthesizeSugarOnTypes = true;
print_options.FullyQualifiedTypesIfAmbiguous = true;
auto dict = GetArchetypeNames(swift_type, *GetASTContext(), sc);
print_options.AlternativeTypeNames = &dict;
type_name = swift_type.getString(print_options);
}
return ConstString(type_name);
}
ConstString SwiftASTContext::GetTypeSymbolName(void *type) {
swift::Type swift_type(GetSwiftType(type));
return GetTypeName(swift_type->getWithoutParens().getPointer());
}
ConstString SwiftASTContext::GetMangledTypeName(void *type) {
return GetMangledTypeName(GetSwiftType(type).getPointer());
}
uint32_t
SwiftASTContext::GetTypeInfo(void *type,
CompilerType *pointee_or_element_clang_type) {
VALID_OR_RETURN(0);
if (!type)
return 0;
if (pointee_or_element_clang_type)
pointee_or_element_clang_type->Clear();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
uint32_t swift_flags = eTypeIsSwift;
switch (type_kind) {
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::Error:
case swift::TypeKind::Module:
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::UnboundGeneric:
swift_flags |= eTypeIsGeneric;
break;
case swift::TypeKind::GenericFunction:
swift_flags |= eTypeIsGeneric;
LLVM_FALLTHROUGH;
case swift::TypeKind::Function:
swift_flags |= eTypeIsPointer | eTypeHasValue;
break;
case swift::TypeKind::BuiltinInteger:
swift_flags |=
eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeIsInteger;
break;
case swift::TypeKind::BuiltinFloat:
swift_flags |=
eTypeIsBuiltIn | eTypeHasValue | eTypeIsScalar | eTypeIsFloat;
break;
case swift::TypeKind::BuiltinRawPointer:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue;
break;
case swift::TypeKind::BuiltinNativeObject:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue;
break;
case swift::TypeKind::BuiltinUnknownObject:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue | eTypeIsObjC;
break;
case swift::TypeKind::BuiltinBridgeObject:
swift_flags |= eTypeIsBuiltIn | eTypeHasChildren | eTypeIsPointer |
eTypeIsScalar | eTypeHasValue | eTypeIsObjC;
break;
case swift::TypeKind::BuiltinUnsafeValueBuffer:
swift_flags |=
eTypeIsBuiltIn | eTypeIsPointer | eTypeIsScalar | eTypeHasValue;
break;
case swift::TypeKind::BuiltinVector:
// TODO: OR in eTypeIsFloat or eTypeIsInteger as needed
return eTypeIsBuiltIn | eTypeHasChildren | eTypeIsVector;
break;
case swift::TypeKind::Tuple:
swift_flags |= eTypeHasChildren | eTypeIsTuple;
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
swift_flags |= CompilerType(swift_can_type->getReferenceStorageReferent())
.GetTypeInfo(pointee_or_element_clang_type);
break;
case swift::TypeKind::BoundGenericEnum:
swift_flags |= eTypeIsGeneric | eTypeIsBound;
LLVM_FALLTHROUGH;
case swift::TypeKind::Enum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info) {
if (cached_enum_info->GetNumElementsWithPayload() == 0)
swift_flags |= eTypeHasValue | eTypeIsEnumeration;
else
swift_flags |= eTypeHasValue | eTypeIsEnumeration | eTypeHasChildren;
} else
swift_flags |= eTypeIsEnumeration;
} break;
case swift::TypeKind::BoundGenericStruct:
swift_flags |= eTypeIsGeneric | eTypeIsBound;
LLVM_FALLTHROUGH;
case swift::TypeKind::Struct:
swift_flags |= eTypeHasChildren | eTypeIsStructUnion;
break;
case swift::TypeKind::BoundGenericClass:
swift_flags |= eTypeIsGeneric | eTypeIsBound;
LLVM_FALLTHROUGH;
case swift::TypeKind::Class:
swift_flags |= eTypeHasChildren | eTypeIsClass | eTypeHasValue |
eTypeInstanceIsPointer;
break;
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition:
swift_flags |= eTypeHasChildren | eTypeIsStructUnion | eTypeIsProtocol;
break;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
swift_flags |= eTypeIsMetatype | eTypeHasValue;
break;
case swift::TypeKind::DependentMember:
case swift::TypeKind::GenericTypeParam:
swift_flags |= eTypeHasValue | eTypeIsScalar | eTypeIsPointer |
eTypeIsGenericTypeParam;
break;
case swift::TypeKind::LValue:
if (pointee_or_element_clang_type)
*pointee_or_element_clang_type = GetNonReferenceType(type);
swift_flags |= eTypeHasChildren | eTypeIsReference | eTypeHasValue;
break;
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return swift_flags;
}
lldb::LanguageType SwiftASTContext::GetMinimumLanguage(void *type) {
if (!type)
return lldb::eLanguageTypeC;
return lldb::eLanguageTypeSwift;
}
lldb::TypeClass SwiftASTContext::GetTypeClass(void *type) {
VALID_OR_RETURN(lldb::eTypeClassInvalid);
if (!type)
return lldb::eTypeClassInvalid;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
return lldb::eTypeClassOther;
case swift::TypeKind::BuiltinInteger:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinFloat:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinRawPointer:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinNativeObject:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinUnsafeValueBuffer:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinUnknownObject:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinBridgeObject:
return lldb::eTypeClassBuiltin;
case swift::TypeKind::BuiltinVector:
return lldb::eTypeClassVector;
case swift::TypeKind::Tuple:
return lldb::eTypeClassArray;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetTypeClass();
case swift::TypeKind::GenericTypeParam:
return lldb::eTypeClassOther;
case swift::TypeKind::DependentMember:
return lldb::eTypeClassOther;
case swift::TypeKind::Enum:
return lldb::eTypeClassUnion;
case swift::TypeKind::Struct:
return lldb::eTypeClassStruct;
case swift::TypeKind::Class:
return lldb::eTypeClassClass;
case swift::TypeKind::Protocol:
return lldb::eTypeClassOther;
case swift::TypeKind::Metatype:
return lldb::eTypeClassOther;
case swift::TypeKind::Module:
return lldb::eTypeClassOther;
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
return lldb::eTypeClassOther;
case swift::TypeKind::Function:
return lldb::eTypeClassFunction;
case swift::TypeKind::GenericFunction:
return lldb::eTypeClassFunction;
case swift::TypeKind::ProtocolComposition:
return lldb::eTypeClassOther;
case swift::TypeKind::LValue:
return lldb::eTypeClassReference;
case swift::TypeKind::UnboundGeneric:
return lldb::eTypeClassOther;
case swift::TypeKind::BoundGenericClass:
return lldb::eTypeClassClass;
case swift::TypeKind::BoundGenericEnum:
return lldb::eTypeClassUnion;
case swift::TypeKind::BoundGenericStruct:
return lldb::eTypeClassStruct;
case swift::TypeKind::TypeVariable:
return lldb::eTypeClassOther;
case swift::TypeKind::ExistentialMetatype:
return lldb::eTypeClassOther;
case swift::TypeKind::DynamicSelf:
return lldb::eTypeClassOther;
case swift::TypeKind::SILBox:
return lldb::eTypeClassOther;
case swift::TypeKind::SILFunction:
return lldb::eTypeClassFunction;
case swift::TypeKind::SILBlockStorage:
return lldb::eTypeClassOther;
case swift::TypeKind::Unresolved:
return lldb::eTypeClassOther;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return lldb::eTypeClassOther;
}
unsigned SwiftASTContext::GetTypeQualifiers(void *type) { return 0; }
//----------------------------------------------------------------------
// Creating related types
//----------------------------------------------------------------------
CompilerType SwiftASTContext::GetArrayElementType(void *type,
uint64_t *stride) {
VALID_OR_RETURN(CompilerType());
CompilerType element_type;
if (type) {
swift::CanType swift_type(GetCanonicalSwiftType(type));
// There are a couple of structs that mean "Array" in Swift:
// Array<T>
// NativeArray<T>
// Slice<T>
// Treat them as arrays for convenience sake.
swift::BoundGenericStructType *boundGenericStructType(
swift_type->getAs<swift::BoundGenericStructType>());
if (boundGenericStructType) {
auto args = boundGenericStructType->getGenericArgs();
swift::StructDecl *decl = boundGenericStructType->getDecl();
if (args.size() == 1 && decl->getModuleContext()->isStdlibModule()) {
const char *declname = decl->getName().get();
if (0 == strcmp(declname, "NativeArray") ||
0 == strcmp(declname, "Array") ||
0 == strcmp(declname, "ArraySlice")) {
assert(GetASTContext() == &args[0].getPointer()->getASTContext());
element_type = CompilerType(args[0].getPointer());
}
}
}
}
return element_type;
}
CompilerType SwiftASTContext::GetCanonicalType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type)
return {GetCanonicalSwiftType(type).getPointer()};
return CompilerType();
}
CompilerType SwiftASTContext::GetInstanceType(void *type) {
VALID_OR_RETURN(CompilerType());
if (!type)
return {};
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
assert((&swift_can_type->getASTContext() == GetASTContext()) &&
"input type belongs to different SwiftASTContext");
auto metatype_type = swift::dyn_cast<swift::AnyMetatypeType>(swift_can_type);
if (metatype_type)
return {metatype_type.getInstanceType().getPointer()};
return {GetSwiftType(type)};
}
CompilerType SwiftASTContext::GetFullyUnqualifiedType(void *type) {
VALID_OR_RETURN(CompilerType());
return {GetSwiftType(type)};
}
int SwiftASTContext::GetFunctionArgumentCount(void *type) {
return GetNumberOfFunctionArguments(type);
}
CompilerType SwiftASTContext::GetFunctionArgumentTypeAtIndex(void *type,
size_t idx) {
return GetFunctionArgumentAtIndex(type, idx);
}
CompilerType SwiftASTContext::GetFunctionReturnType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
auto func =
swift::dyn_cast<swift::AnyFunctionType>(GetCanonicalSwiftType(type));
if (func)
return {func.getResult().getPointer()};
}
return {};
}
size_t SwiftASTContext::GetNumMemberFunctions(void *type) {
size_t num_functions = 0;
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto nominal_decl = swift_can_type.getAnyNominal();
if (nominal_decl) {
auto iter = nominal_decl->getMembers().begin();
auto end = nominal_decl->getMembers().end();
for (; iter != end; iter++) {
switch (iter->getKind()) {
case swift::DeclKind::Constructor:
case swift::DeclKind::Destructor:
case swift::DeclKind::Func:
num_functions += 1;
break;
default:
break;
}
}
}
}
return num_functions;
}
TypeMemberFunctionImpl SwiftASTContext::GetMemberFunctionAtIndex(void *type,
size_t idx) {
VALID_OR_RETURN(TypeMemberFunctionImpl());
std::string name("");
CompilerType result_type;
MemberFunctionKind kind(MemberFunctionKind::eMemberFunctionKindUnknown);
swift::AbstractFunctionDecl *the_decl_we_care_about = nullptr;
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
auto nominal_decl = swift_can_type.getAnyNominal();
if (nominal_decl) {
auto iter = nominal_decl->getMembers().begin();
auto end = nominal_decl->getMembers().end();
for (; iter != end; iter++) {
auto decl_kind = iter->getKind();
switch (decl_kind) {
case swift::DeclKind::Constructor:
case swift::DeclKind::Destructor:
case swift::DeclKind::Func: {
if (idx == 0) {
swift::AbstractFunctionDecl *abstract_func_decl =
llvm::dyn_cast_or_null<swift::AbstractFunctionDecl>(*iter);
if (abstract_func_decl) {
switch (decl_kind) {
case swift::DeclKind::Constructor:
name.clear();
kind = lldb::eMemberFunctionKindConstructor;
the_decl_we_care_about = abstract_func_decl;
break;
case swift::DeclKind::Destructor:
name.clear();
kind = lldb::eMemberFunctionKindDestructor;
the_decl_we_care_about = abstract_func_decl;
break;
case swift::DeclKind::Func:
default: {
swift::FuncDecl *func_decl =
llvm::dyn_cast<swift::FuncDecl>(*iter);
if (func_decl) {
if (func_decl->getName().empty())
name.clear();
else
name.assign(func_decl->getName().get());
if (func_decl->isStatic())
kind = lldb::eMemberFunctionKindStaticMethod;
else
kind = lldb::eMemberFunctionKindInstanceMethod;
the_decl_we_care_about = func_decl;
}
}
}
result_type = CompilerType(
abstract_func_decl->getInterfaceType().getPointer());
}
} else
--idx;
} break;
default:
break;
}
}
}
}
if (type && the_decl_we_care_about && (kind != eMemberFunctionKindUnknown))
return TypeMemberFunctionImpl(
result_type, CompilerDecl(this, the_decl_we_care_about), name, kind);
return TypeMemberFunctionImpl();
}
CompilerType SwiftASTContext::GetLValueReferenceType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type)
return {swift::LValueType::get(GetSwiftType(type))};
return {};
}
CompilerType SwiftASTContext::GetRValueReferenceType(void *type) { return {}; }
CompilerType SwiftASTContext::GetNonReferenceType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::LValueType *lvalue = swift_can_type->getAs<swift::LValueType>();
if (lvalue)
return {lvalue->getObjectType().getPointer()};
}
return {};
}
CompilerType SwiftASTContext::GetPointeeType(void *type) { return {}; }
CompilerType SwiftASTContext::GetPointerType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::Type swift_type(::GetSwiftType(type));
const swift::TypeKind type_kind = swift_type->getKind();
if (type_kind == swift::TypeKind::BuiltinRawPointer)
return {swift_type};
}
return {};
}
CompilerType SwiftASTContext::GetTypedefedType(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::Type swift_type(::GetSwiftType(type));
swift::TypeAliasType *name_alias_type =
swift::dyn_cast<swift::TypeAliasType>(swift_type.getPointer());
if (name_alias_type) {
return {name_alias_type->getSinglyDesugaredType()};
}
}
return {};
}
CompilerType
SwiftASTContext::GetUnboundType(lldb::opaque_compiler_type_t type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::BoundGenericType *bound_generic_type =
swift_can_type->getAs<swift::BoundGenericType>();
if (bound_generic_type) {
swift::NominalTypeDecl *nominal_type_decl = bound_generic_type->getDecl();
if (nominal_type_decl)
return {nominal_type_decl->getDeclaredType()};
}
}
return {GetSwiftType(type)};
}
//----------------------------------------------------------------------
// Create related types using the current type's AST
//----------------------------------------------------------------------
CompilerType SwiftASTContext::GetBasicTypeFromAST(lldb::BasicType basic_type) {
return {};
}
//----------------------------------------------------------------------
// Exploring the type
//----------------------------------------------------------------------
const swift::irgen::TypeInfo *SwiftASTContext::GetSwiftTypeInfo(void *type) {
VALID_OR_RETURN(nullptr);
if (type) {
auto &irgen_module = GetIRGenModule();
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::SILType swift_sil_type = irgen_module.getLoweredType(swift_can_type);
return &irgen_module.getTypeInfo(swift_sil_type);
}
return nullptr;
}
const swift::irgen::FixedTypeInfo *
SwiftASTContext::GetSwiftFixedTypeInfo(void *type) {
VALID_OR_RETURN(nullptr);
const swift::irgen::TypeInfo *type_info = GetSwiftTypeInfo(type);
if (type_info) {
if (type_info->isFixedSize())
return swift::cast<const swift::irgen::FixedTypeInfo>(type_info);
}
return nullptr;
}
bool SwiftASTContext::IsFixedSize(CompilerType compiler_type) {
VALID_OR_RETURN(false);
const swift::irgen::FixedTypeInfo *type_info =
GetSwiftFixedTypeInfo(compiler_type.GetOpaqueQualType());
if (type_info)
return type_info->isFixedSize();
return false;
}
llvm::Optional<uint64_t>
SwiftASTContext::GetBitSize(lldb::opaque_compiler_type_t type,
ExecutionContextScope *exe_scope) {
if (!type)
return {};
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (swift_can_type->hasTypeParameter()) {
if (!exe_scope)
return {};
ExecutionContext exe_ctx;
exe_scope->CalculateExecutionContext(exe_ctx);
auto swift_scratch_ctx_lock = SwiftASTContextLock(&exe_ctx);
CompilerType bound_type = BindAllArchetypes({this, type}, exe_scope);
// Note thay the bound type may be in a different AST context.
return bound_type.GetBitSize(nullptr).getValueOr(0);
}
// lldb ValueObject subsystem expects functions to be a single
// pointer in size to print them correctly. This is not true
// for swift (where functions aren't necessarily a single pointer
// in size), so we need to work around the limitation here.
if (swift_can_type->getKind() == swift::TypeKind::Function)
return GetPointerByteSize() * 8;
const swift::irgen::FixedTypeInfo *fixed_type_info =
GetSwiftFixedTypeInfo(type);
if (fixed_type_info)
return fixed_type_info->getFixedSize().getValue() * 8;
if (!exe_scope)
return {};
if (auto *runtime = exe_scope->CalculateProcess()->GetSwiftLanguageRuntime())
return runtime->GetBitSize({this, type});
// FIXME: This should be {}.
return 0;
}
uint64_t SwiftASTContext::GetByteStride(lldb::opaque_compiler_type_t type) {
if (type) {
const swift::irgen::FixedTypeInfo *fixed_type_info =
GetSwiftFixedTypeInfo(type);
if (fixed_type_info)
return fixed_type_info->getFixedStride().getValue();
}
return 0;
}
size_t SwiftASTContext::GetTypeBitAlign(void *type) {
if (type) {
const swift::irgen::FixedTypeInfo *fixed_type_info =
GetSwiftFixedTypeInfo(type);
if (fixed_type_info)
return fixed_type_info->getFixedAlignment().getValue();
}
return 0;
}
lldb::Encoding SwiftASTContext::GetEncoding(void *type, uint64_t &count) {
VALID_OR_RETURN(lldb::eEncodingInvalid);
if (!type)
return lldb::eEncodingInvalid;
count = 1;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
break;
case swift::TypeKind::BuiltinInteger:
return lldb::eEncodingSint; // TODO: detect if an integer is unsigned
case swift::TypeKind::BuiltinFloat:
return lldb::eEncodingIEEE754; // TODO: detect if an integer is unsigned
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::Class: // Classes are pointers in swift...
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
return lldb::eEncodingUint;
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::Tuple:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetEncoding(count);
break;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
return lldb::eEncodingUint;
case swift::TypeKind::GenericFunction:
case swift::TypeKind::Function:
return lldb::eEncodingUint;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum:
break;
case swift::TypeKind::Struct:
case swift::TypeKind::Protocol:
case swift::TypeKind::Module:
case swift::TypeKind::ProtocolComposition:
break;
case swift::TypeKind::LValue:
return lldb::eEncodingUint;
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
count = 0;
return lldb::eEncodingInvalid;
}
lldb::Format SwiftASTContext::GetFormat(void *type) {
VALID_OR_RETURN(lldb::eFormatInvalid);
if (!type)
return lldb::eFormatDefault;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
break;
case swift::TypeKind::BuiltinInteger:
return eFormatDecimal; // TODO: detect if an integer is unsigned
case swift::TypeKind::BuiltinFloat:
return eFormatFloat; // TODO: detect if an integer is unsigned
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
return eFormatAddressInfo;
// Classes are always pointers in swift.
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
return eFormatHex;
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::Tuple:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetFormat();
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum:
return eFormatUnsigned;
case swift::TypeKind::GenericFunction:
case swift::TypeKind::Function:
return lldb::eFormatAddressInfo;
case swift::TypeKind::Struct:
case swift::TypeKind::Protocol:
case swift::TypeKind::Metatype:
case swift::TypeKind::Module:
case swift::TypeKind::ProtocolComposition:
break;
case swift::TypeKind::LValue:
return lldb::eFormatHex;
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
// We don't know hot to display this type.
return lldb::eFormatBytes;
}
uint32_t SwiftASTContext::GetNumChildren(void *type,
bool omit_empty_base_classes,
const ExecutionContext *exe_ctx) {
VALID_OR_RETURN(0);
if (!type)
return 0;
uint32_t num_children = 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
case swift::TypeKind::Module:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetNumChildren(omit_empty_base_classes, exe_ctx);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info)
return cached_enum_info->GetNumElementsWithPayload();
} break;
case swift::TypeKind::Tuple:
case swift::TypeKind::Struct:
case swift::TypeKind::BoundGenericStruct:
return GetNumFields(type);
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass: {
auto class_decl = swift_can_type->getClassOrBoundGenericClass();
return (class_decl->hasSuperclass() ? 1 : 0) + GetNumFields(type);
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(CompilerType(GetSwiftType(type)), protocol_info))
break;
return protocol_info.m_num_storage_words;
}
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
return 0;
case swift::TypeKind::LValue: {
swift::LValueType *lvalue_type =
swift_can_type->castTo<swift::LValueType>();
swift::TypeBase *deref_type = lvalue_type->getObjectType().getPointer();
uint32_t num_pointee_children =
CompilerType(deref_type)
.GetNumChildren(omit_empty_base_classes, exe_ctx);
// If this type points to a simple type (or to a class), then it
// has 1 child.
if (num_pointee_children == 0 || deref_type->getClassOrBoundGenericClass())
num_children = 1;
else
num_children = num_pointee_children;
} break;
case swift::TypeKind::UnboundGeneric:
break;
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return num_children;
}
lldb::BasicType SwiftASTContext::GetBasicTypeEnumeration(void *type) {
return eBasicTypeInvalid;
}
#pragma mark Aggregate Types
uint32_t SwiftASTContext::GetNumDirectBaseClasses(void *opaque_type) {
if (!opaque_type)
return 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(opaque_type));
swift::ClassDecl *class_decl = swift_can_type->getClassOrBoundGenericClass();
if (class_decl) {
if (class_decl->hasSuperclass())
return 1;
}
return 0;
}
uint32_t SwiftASTContext::GetNumVirtualBaseClasses(void *opaque_type) {
return 0;
}
uint32_t SwiftASTContext::GetNumFields(void *type) {
VALID_OR_RETURN(0);
if (!type)
return 0;
uint32_t count = 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetNumFields();
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info)
return cached_enum_info->GetNumElementsWithPayload();
} break;
case swift::TypeKind::Tuple:
return cast<swift::TupleType>(swift_can_type)->getNumElements();
case swift::TypeKind::Struct:
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
return GetStoredProperties(nominal).size();
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition:
return GetNumChildren(type, /*omit_empty_base_classes=*/false, nullptr);
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
return 0;
case swift::TypeKind::Module:
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::LValue:
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return count;
}
CompilerType
SwiftASTContext::GetDirectBaseClassAtIndex(void *opaque_type, size_t idx,
uint32_t *bit_offset_ptr) {
VALID_OR_RETURN(CompilerType());
if (opaque_type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(opaque_type));
swift::ClassDecl *class_decl =
swift_can_type->getClassOrBoundGenericClass();
if (class_decl) {
swift::Type base_class_type = class_decl->getSuperclass();
if (base_class_type)
return {base_class_type.getPointer()};
}
}
return {};
}
CompilerType
SwiftASTContext::GetVirtualBaseClassAtIndex(void *opaque_type, size_t idx,
uint32_t *bit_offset_ptr) {
return {};
}
/// Retrieve the printable name of a tuple element.
static std::string GetTupleElementName(const swift::TupleType *tuple_type,
unsigned index,
llvm::StringRef printed_index = "") {
const auto &element = tuple_type->getElement(index);
// Use the element name if there is one.
if (!element.getName().empty())
return element.getName().str();
// If we know the printed index already, use that.
if (!printed_index.empty())
return printed_index;
// Print the index and return that.
std::string str;
llvm::raw_string_ostream(str) << index;
return str;
}
/// Retrieve the printable name of a type referenced as a superclass.
static std::string GetSuperclassName(const CompilerType &superclass_type) {
return superclass_type.GetUnboundType().GetTypeName().AsCString(
"<no type name>");
}
/// Retrieve the type and name of a child of an existential type.
static std::pair<CompilerType, std::string>
GetExistentialTypeChild(swift::ASTContext *swift_ast_ctx, CompilerType type,
const SwiftASTContext::ProtocolInfo &protocol_info,
unsigned idx) {
assert(idx < protocol_info.m_num_storage_words &&
"caller is responsible for validating index");
// A payload word for a non-class, non-error existential.
if (idx < protocol_info.m_num_payload_words) {
std::string name;
llvm::raw_string_ostream(name) << "payload_data_" << idx;
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
return {CompilerType(raw_pointer.getPointer()), std::move(name)};
}
// The instance for a class-bound existential.
if (idx == 0 && protocol_info.m_is_class_only) {
CompilerType class_type;
if (protocol_info.m_superclass) {
class_type = protocol_info.m_superclass;
} else {
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
class_type = CompilerType(raw_pointer.getPointer());
}
return {class_type, "instance"};
}
// The instance for an error existential.
if (idx == 0 && protocol_info.m_is_errortype) {
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
return {CompilerType(raw_pointer.getPointer()), "error_instance"};
}
// The metatype for a non-class, non-error existential.
if (idx && idx == protocol_info.m_num_payload_words) {
// The metatype for a non-class, non-error existential.
auto any_metatype =
swift::ExistentialMetatypeType::get(swift_ast_ctx->TheAnyType);
return {CompilerType(any_metatype), "instance_type"};
}
// A witness table. Figure out which protocol it corresponds to.
unsigned witness_table_idx = idx - protocol_info.m_num_payload_words - 1;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::ExistentialLayout layout = swift_can_type.getExistentialLayout();
std::string name;
for (auto protoType : layout.getProtocols()) {
auto proto = protoType->getDecl();
if (proto->isObjC())
continue;
if (witness_table_idx == 0) {
llvm::raw_string_ostream(name)
<< "witness_table_" << proto->getBaseName().userFacingName();
break;
}
--witness_table_idx;
}
auto raw_pointer = swift_ast_ctx->TheRawPointerType;
return {CompilerType(raw_pointer.getPointer()), std::move(name)};
}
CompilerType SwiftASTContext::GetFieldAtIndex(void *type, size_t idx,
std::string &name,
uint64_t *bit_offset_ptr,
uint32_t *bitfield_bit_size_ptr,
bool *is_bitfield_ptr) {
VALID_OR_RETURN(CompilerType());
if (!type)
return {};
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetFieldAtIndex(idx, name, bit_offset_ptr, bitfield_bit_size_ptr,
is_bitfield_ptr);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info &&
idx < cached_enum_info->GetNumElementsWithPayload()) {
const SwiftEnumDescriptor::ElementInfo *enum_element_info =
cached_enum_info->GetElementWithPayloadAtIndex(idx);
name.assign(enum_element_info->name.GetCString());
if (bit_offset_ptr)
*bit_offset_ptr = 0;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
return enum_element_info->payload_type;
}
} break;
case swift::TypeKind::Tuple: {
auto tuple_type = cast<swift::TupleType>(swift_can_type);
if (idx >= tuple_type->getNumElements())
break;
// We cannot reliably get layout information without an execution
// context.
if (bit_offset_ptr)
*bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
name = GetTupleElementName(tuple_type, idx);
const auto &child = tuple_type->getElement(idx);
return CompilerType(child.getType().getPointer());
}
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass: {
auto class_decl = swift_can_type->getClassOrBoundGenericClass();
if (class_decl->hasSuperclass()) {
if (idx == 0) {
swift::Type superclass_swift_type = swift_can_type->getSuperclass();
CompilerType superclass_type(superclass_swift_type.getPointer());
name = GetSuperclassName(superclass_type);
// We cannot reliably get layout information without an
// execution context.
if (bit_offset_ptr)
*bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
return superclass_type;
}
// Adjust the index to refer into the stored properties.
--idx;
}
LLVM_FALLTHROUGH;
}
case swift::TypeKind::Struct:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
auto stored_properties = GetStoredProperties(nominal);
if (idx >= stored_properties.size())
break;
auto property = stored_properties[idx];
name = property->getBaseName().userFacingName();
// We cannot reliably get layout information without an execution
// context.
if (bit_offset_ptr)
*bit_offset_ptr = LLDB_INVALID_IVAR_OFFSET;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
swift::Type child_swift_type = swift_can_type->getTypeOfMember(
nominal->getModuleContext(), property, nullptr);
return CompilerType(child_swift_type.getPointer());
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(CompilerType(GetSwiftType(type)), protocol_info))
break;
if (idx >= protocol_info.m_num_storage_words)
break;
CompilerType compiler_type(GetSwiftType(type));
CompilerType child_type;
std::tie(child_type, name) = GetExistentialTypeChild(
GetASTContext(), compiler_type, protocol_info, idx);
llvm::Optional<uint64_t> child_size = child_type.GetByteSize(nullptr);
if (!child_size)
return {};
if (bit_offset_ptr)
*bit_offset_ptr = idx * *child_size * 8;
if (bitfield_bit_size_ptr)
*bitfield_bit_size_ptr = 0;
if (is_bitfield_ptr)
*is_bitfield_ptr = false;
return child_type;
}
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
break;
case swift::TypeKind::Module:
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::LValue:
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return CompilerType();
}
// If a pointer to a pointee type (the clang_type arg) says that it
// has no children, then we either need to trust it, or override it
// and return a different result. For example, an "int *" has one
// child that is an integer, but a function pointer doesn't have any
// children. Likewise if a Record type claims it has no children, then
// there really is nothing to show.
uint32_t SwiftASTContext::GetNumPointeeChildren(void *type) {
if (!type)
return 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
return 0;
case swift::TypeKind::BuiltinInteger:
return 1;
case swift::TypeKind::BuiltinFloat:
return 1;
case swift::TypeKind::BuiltinRawPointer:
return 1;
case swift::TypeKind::BuiltinUnsafeValueBuffer:
return 1;
case swift::TypeKind::BuiltinNativeObject:
return 1;
case swift::TypeKind::BuiltinUnknownObject:
return 1;
case swift::TypeKind::BuiltinBridgeObject:
return 1;
case swift::TypeKind::BuiltinVector:
return 0;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return GetNumPointeeChildren(
swift::cast<swift::ReferenceStorageType>(swift_can_type).getPointer());
case swift::TypeKind::Tuple:
return 0;
case swift::TypeKind::GenericTypeParam:
return 0;
case swift::TypeKind::DependentMember:
return 0;
case swift::TypeKind::Enum:
return 0;
case swift::TypeKind::Struct:
return 0;
case swift::TypeKind::Class:
return 0;
case swift::TypeKind::Protocol:
return 0;
case swift::TypeKind::Metatype:
return 0;
case swift::TypeKind::Module:
return 0;
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
return 0;
case swift::TypeKind::Function:
return 0;
case swift::TypeKind::GenericFunction:
return 0;
case swift::TypeKind::ProtocolComposition:
return 0;
case swift::TypeKind::LValue:
return 1;
case swift::TypeKind::UnboundGeneric:
return 0;
case swift::TypeKind::BoundGenericClass:
return 0;
case swift::TypeKind::BoundGenericEnum:
return 0;
case swift::TypeKind::BoundGenericStruct:
return 0;
case swift::TypeKind::TypeVariable:
return 0;
case swift::TypeKind::ExistentialMetatype:
return 0;
case swift::TypeKind::DynamicSelf:
return 0;
case swift::TypeKind::SILBox:
return 0;
case swift::TypeKind::SILFunction:
return 0;
case swift::TypeKind::SILBlockStorage:
return 0;
case swift::TypeKind::Unresolved:
return 0;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return 0;
}
static llvm::Optional<uint64_t> GetInstanceVariableOffset_Metadata(
ValueObject *valobj, ExecutionContext *exe_ctx, const CompilerType &type,
StringRef ivar_name, const CompilerType &ivar_type) {
llvm::SmallString<1> m_description;
LOG_PRINTF(LIBLLDB_LOG_TYPES, "ivar_name = %s, type = %s",
ivar_name.str().c_str(), type.GetTypeName().AsCString());
Process *process = exe_ctx->GetProcessPtr();
if (!process) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "no process");
return {};
}
SwiftLanguageRuntime *runtime = process->GetSwiftLanguageRuntime();
if (!runtime) {
LOG_PRINTF(LIBLLDB_LOG_TYPES, "no runtime");
return {};
}
Status error;
llvm::Optional<uint64_t> offset = runtime->GetMemberVariableOffset(
type, valobj, ConstString(ivar_name), &error);
if (offset)
LOG_PRINTF(LIBLLDB_LOG_TYPES, "for %s: %lu", ivar_name.str().c_str(),
*offset);
else
LOG_PRINTF(LIBLLDB_LOG_TYPES, "resolver failure: %s", error.AsCString());
return offset;
}
static llvm::Optional<uint64_t>
GetInstanceVariableOffset(ValueObject *valobj, ExecutionContext *exe_ctx,
const CompilerType &class_type, StringRef ivar_name,
const CompilerType &ivar_type) {
if (ivar_name.empty())
return {};
if (!exe_ctx)
return {};
Target *target = exe_ctx->GetTargetPtr();
if (!target)
return {};
return GetInstanceVariableOffset_Metadata(valobj, exe_ctx, class_type,
ivar_name, ivar_type);
}
bool SwiftASTContext::IsNonTriviallyManagedReferenceType(
const CompilerType &type, NonTriviallyManagedReferenceStrategy &strategy,
CompilerType *underlying_type) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
default:
break;
case swift::TypeKind::UnmanagedStorage: {
strategy = NonTriviallyManagedReferenceStrategy::eUnmanaged;
if (underlying_type)
*underlying_type = CompilerType(
swift_can_type->getReferenceStorageReferent().getPointer());
}
return true;
case swift::TypeKind::UnownedStorage: {
strategy = NonTriviallyManagedReferenceStrategy::eUnowned;
if (underlying_type)
*underlying_type = CompilerType(
swift_can_type->getReferenceStorageReferent().getPointer());
}
return true;
case swift::TypeKind::WeakStorage: {
strategy = NonTriviallyManagedReferenceStrategy::eWeak;
if (underlying_type)
*underlying_type = CompilerType(
swift_can_type->getReferenceStorageReferent().getPointer());
}
return true;
}
}
return false;
}
CompilerType SwiftASTContext::GetChildCompilerTypeAtIndex(
void *type, ExecutionContext *exe_ctx, size_t idx,
bool transparent_pointers, bool omit_empty_base_classes,
bool ignore_array_bounds, std::string &child_name,
uint32_t &child_byte_size, int32_t &child_byte_offset,
uint32_t &child_bitfield_bit_size, uint32_t &child_bitfield_bit_offset,
bool &child_is_base_class, bool &child_is_deref_of_parent,
ValueObject *valobj, uint64_t &language_flags) {
VALID_OR_RETURN(CompilerType());
if (!type)
return CompilerType();
auto get_type_size = [&exe_ctx](uint32_t &result, CompilerType type) {
auto *exe_scope =
exe_ctx ? exe_ctx->GetBestExecutionContextScope() : nullptr;
llvm::Optional<uint64_t> size = type.GetByteSize(exe_scope);
if (!size)
return false;
result = *size;
return true;
};
language_flags = 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
assert(&swift_can_type->getASTContext() == GetASTContext());
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetChildCompilerTypeAtIndex(
exe_ctx, idx, transparent_pointers, omit_empty_base_classes,
ignore_array_bounds, child_name, child_byte_size, child_byte_offset,
child_bitfield_bit_size, child_bitfield_bit_offset,
child_is_base_class, child_is_deref_of_parent, valobj,
language_flags);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info &&
idx < cached_enum_info->GetNumElementsWithPayload()) {
const SwiftEnumDescriptor::ElementInfo *element_info =
cached_enum_info->GetElementWithPayloadAtIndex(idx);
child_name.assign(element_info->name.GetCString());
if (!get_type_size(child_byte_size, element_info->payload_type))
return {};
child_byte_offset = 0;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
child_is_base_class = false;
child_is_deref_of_parent = false;
if (element_info->is_indirect) {
language_flags |= LanguageFlags::eIsIndirectEnumCase;
return CompilerType(GetASTContext()->TheRawPointerType.getPointer());
} else
return element_info->payload_type;
}
} break;
case swift::TypeKind::Tuple: {
auto tuple_type = cast<swift::TupleType>(swift_can_type);
if (idx >= tuple_type->getNumElements())
break;
const auto &child = tuple_type->getElement(idx);
// Format the integer.
llvm::SmallString<16> printed_idx;
llvm::raw_svector_ostream(printed_idx) << idx;
child_name = GetTupleElementName(tuple_type, idx, printed_idx);
CompilerType child_type(child.getType().getPointer());
if (!get_type_size(child_byte_size, child_type))
return {};
child_is_base_class = false;
child_is_deref_of_parent = false;
CompilerType compiler_type(GetSwiftType(type));
llvm::Optional<uint64_t> offset = GetInstanceVariableOffset(
valobj, exe_ctx, compiler_type, printed_idx.c_str(), child_type);
if (!offset)
return {};
child_byte_offset = *offset;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
return child_type;
}
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass: {
auto class_decl = swift_can_type->getClassOrBoundGenericClass();
// Child 0 is the superclass, if there is one.
if (class_decl->hasSuperclass()) {
if (idx == 0) {
swift::Type superclass_swift_type = swift_can_type->getSuperclass();
CompilerType superclass_type(superclass_swift_type.getPointer());
child_name = GetSuperclassName(superclass_type);
if (!get_type_size(child_byte_size, superclass_type))
return {};
child_is_base_class = true;
child_is_deref_of_parent = false;
child_byte_offset = 0;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
language_flags |= LanguageFlags::eIgnoreInstancePointerness;
return superclass_type;
}
// Adjust the index to refer into the stored properties.
--idx;
}
LLVM_FALLTHROUGH;
}
case swift::TypeKind::Struct:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
auto stored_properties = GetStoredProperties(nominal);
if (idx >= stored_properties.size())
break;
// Find the stored property with this index.
auto property = stored_properties[idx];
swift::Type child_swift_type = swift_can_type->getTypeOfMember(
nominal->getModuleContext(), property, nullptr);
CompilerType child_type(child_swift_type.getPointer());
child_name = property->getBaseName().userFacingName();
if (!get_type_size(child_byte_size, child_type))
return {};
child_is_base_class = false;
child_is_deref_of_parent = false;
CompilerType compiler_type(GetSwiftType(type));
llvm::Optional<uint64_t> offset = GetInstanceVariableOffset(
valobj, exe_ctx, compiler_type, child_name.c_str(), child_type);
if (!offset)
return {};
child_byte_offset = *offset;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
return child_type;
}
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(CompilerType(GetSwiftType(type)), protocol_info))
break;
if (idx >= protocol_info.m_num_storage_words)
break;
CompilerType compiler_type(GetSwiftType(type));
CompilerType child_type;
std::tie(child_type, child_name) = GetExistentialTypeChild(
GetASTContext(), compiler_type, protocol_info, idx);
if (!get_type_size(child_byte_size, child_type))
return {};
child_byte_offset = idx * child_byte_size;
child_bitfield_bit_size = 0;
child_bitfield_bit_offset = 0;
child_is_base_class = false;
child_is_deref_of_parent = false;
return child_type;
}
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
break;
case swift::TypeKind::Module:
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
break;
case swift::TypeKind::LValue:
if (idx < GetNumChildren(type, omit_empty_base_classes, exe_ctx)) {
CompilerType pointee_clang_type(GetNonReferenceType(type));
Flags pointee_clang_type_flags(pointee_clang_type.GetTypeInfo());
const char *parent_name = valobj ? valobj->GetName().GetCString() : NULL;
if (parent_name) {
child_name.assign(1, '&');
child_name += parent_name;
}
// We have a pointer to a simple type
if (idx == 0) {
if (!get_type_size(child_byte_size, pointee_clang_type))
return {};
child_byte_offset = 0;
return pointee_clang_type;
}
}
break;
case swift::TypeKind::UnboundGeneric:
break;
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return CompilerType();
}
// Look for a child member (doesn't include base classes, but it does
// include their members) in the type hierarchy. Returns an index path
// into "clang_type" on how to reach the appropriate member.
//
// class A
// {
// public:
// int m_a;
// int m_b;
// };
//
// class B
// {
// };
//
// class C :
// public B,
// public A
// {
// };
//
// If we have a clang type that describes "class C", and we wanted to
// look for "m_b" in it:
//
// With omit_empty_base_classes == false we would get an integer array back
// with:
// { 1, 1 }
// The first index 1 is the child index for "class A" within class C.
// The second index 1 is the child index for "m_b" within class A.
//
// With omit_empty_base_classes == true we would get an integer array back with:
// { 0, 1 }
// The first index 0 is the child index for "class A" within class C
// (since class B doesn't have any members it doesn't count). The
// second index 1 is the child index for "m_b" within class A.
size_t SwiftASTContext::GetIndexOfChildMemberWithName(
void *type, const char *name, bool omit_empty_base_classes,
std::vector<uint32_t> &child_indexes) {
VALID_OR_RETURN(0);
if (type && name && name[0]) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.GetIndexOfChildMemberWithName(name, omit_empty_base_classes,
child_indexes);
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info) {
ConstString const_name(name);
const size_t num_sized_elements =
cached_enum_info->GetNumElementsWithPayload();
for (size_t i = 0; i < num_sized_elements; ++i) {
if (cached_enum_info->GetElementWithPayloadAtIndex(i)->name ==
const_name) {
child_indexes.push_back(i);
return child_indexes.size();
}
}
}
} break;
case swift::TypeKind::Tuple: {
// For tuples only always look for the member by number first as
// a tuple element can be named, yet still be accessed by the
// number.
swift::TupleType *tuple_type = swift_can_type->castTo<swift::TupleType>();
uint32_t tuple_idx = StringConvert::ToUInt32(name, UINT32_MAX);
if (tuple_idx != UINT32_MAX) {
if (tuple_idx < tuple_type->getNumElements()) {
child_indexes.push_back(tuple_idx);
return child_indexes.size();
} else
return 0;
}
// Otherwise, perform lookup by name.
for (uint32_t tuple_idx : swift::range(tuple_type->getNumElements())) {
if (tuple_type->getElement(tuple_idx).getName().str() == name) {
child_indexes.push_back(tuple_idx);
return child_indexes.size();
}
}
return 0;
}
case swift::TypeKind::Struct:
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericStruct: {
auto nominal = swift_can_type->getAnyNominal();
auto stored_properties = GetStoredProperties(nominal);
auto class_decl = llvm::dyn_cast<swift::ClassDecl>(nominal);
// Search the stored properties.
for (unsigned idx : indices(stored_properties)) {
auto property = stored_properties[idx];
if (property->getBaseName().userFacingName() == name) {
// We found it!
// If we have a superclass, adjust the index accordingly.
if (class_decl && class_decl->hasSuperclass())
++idx;
child_indexes.push_back(idx);
return child_indexes.size();
}
}
// Search the superclass, if there is one.
if (class_decl && class_decl->hasSuperclass()) {
// Push index zero for the base class
child_indexes.push_back(0);
// Look in the superclass.
swift::Type superclass_swift_type = swift_can_type->getSuperclass();
CompilerType superclass_type(superclass_swift_type.getPointer());
if (superclass_type.GetIndexOfChildMemberWithName(
name, omit_empty_base_classes, child_indexes))
return child_indexes.size();
// We didn't find a stored property matching "name" in our
// superclass, pop the superclass zero index that we pushed on
// above.
child_indexes.pop_back();
}
} break;
case swift::TypeKind::Protocol:
case swift::TypeKind::ProtocolComposition: {
ProtocolInfo protocol_info;
if (!GetProtocolTypeInfo(CompilerType(GetSwiftType(type)), protocol_info))
break;
CompilerType compiler_type(GetSwiftType(type));
for (unsigned idx : swift::range(protocol_info.m_num_storage_words)) {
CompilerType child_type;
std::string child_name;
std::tie(child_type, child_name) = GetExistentialTypeChild(
GetASTContext(), compiler_type, protocol_info, idx);
if (name == child_name) {
child_indexes.push_back(idx);
return child_indexes.size();
}
}
} break;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype:
break;
case swift::TypeKind::Module:
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
break;
case swift::TypeKind::LValue: {
CompilerType pointee_clang_type(GetNonReferenceType(type));
if (pointee_clang_type.IsAggregateType()) {
return pointee_clang_type.GetIndexOfChildMemberWithName(
name, omit_empty_base_classes, child_indexes);
}
} break;
case swift::TypeKind::UnboundGeneric:
break;
case swift::TypeKind::TypeVariable:
break;
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
}
return 0;
}
/// Get the index of the child of "clang_type" whose name matches. This
/// function doesn't descend into the children, but only looks one
/// level deep and name matches can include base class names.
uint32_t
SwiftASTContext::GetIndexOfChildWithName(void *type, const char *name,
bool omit_empty_base_classes) {
VALID_OR_RETURN(UINT32_MAX);
std::vector<uint32_t> child_indexes;
size_t num_child_indexes = GetIndexOfChildMemberWithName(
type, name, omit_empty_base_classes, child_indexes);
return num_child_indexes == 1 ? child_indexes.front() : UINT32_MAX;
}
size_t SwiftASTContext::GetNumTemplateArguments(void *type) {
if (!type)
return 0;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::UnboundGeneric: {
swift::UnboundGenericType *unbound_generic_type =
swift_can_type->castTo<swift::UnboundGenericType>();
auto *nominal_type_decl = unbound_generic_type->getDecl();
swift::GenericParamList *generic_param_list =
nominal_type_decl->getGenericParams();
return generic_param_list->getParams().size();
} break;
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::BoundGenericEnum: {
swift::BoundGenericType *bound_generic_type =
swift_can_type->castTo<swift::BoundGenericType>();
return bound_generic_type->getGenericArgs().size();
}
default:
break;
}
return 0;
}
bool SwiftASTContext::GetSelectedEnumCase(const CompilerType &type,
const DataExtractor &data,
ConstString *name, bool *has_payload,
CompilerType *payload,
bool *is_indirect) {
if (auto ast =
llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
default:
break;
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info =
ast->GetCachedEnumInfo(swift_can_type.getPointer());
if (cached_enum_info) {
auto enum_elem_info = cached_enum_info->GetElementFromData(data, true);
if (enum_elem_info) {
if (name)
*name = enum_elem_info->name;
if (has_payload)
*has_payload = enum_elem_info->has_payload;
if (payload)
*payload = enum_elem_info->payload_type;
if (is_indirect)
*is_indirect = enum_elem_info->is_indirect;
return true;
}
}
} break;
}
}
return false;
}
lldb::GenericKind SwiftASTContext::GetGenericArgumentKind(void *type,
size_t idx) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
if (auto *unbound_generic_type =
swift_can_type->getAs<swift::UnboundGenericType>())
return eUnboundGenericKindType;
if (auto *bound_generic_type =
swift_can_type->getAs<swift::BoundGenericType>())
if (idx < bound_generic_type->getGenericArgs().size())
return eBoundGenericKindType;
}
return eNullGenericKindType;
}
CompilerType SwiftASTContext::GetBoundGenericType(void *type, size_t idx) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
assert(&swift_can_type->getASTContext() == GetASTContext());
if (auto *bound_generic_type =
swift_can_type->getAs<swift::BoundGenericType>())
if (idx < bound_generic_type->getGenericArgs().size())
return {bound_generic_type->getGenericArgs()[idx].getPointer()};
}
return {};
}
CompilerType SwiftASTContext::GetUnboundGenericType(void *type, size_t idx) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
assert(&swift_can_type->getASTContext() == GetASTContext());
if (auto *unbound_generic_type =
swift_can_type->getAs<swift::UnboundGenericType>()) {
auto *nominal_type_decl = unbound_generic_type->getDecl();
swift::GenericSignature *generic_sig =
nominal_type_decl->getGenericSignature();
auto depTy = generic_sig->getGenericParams()[idx];
return {nominal_type_decl->mapTypeIntoContext(depTy)
->castTo<swift::ArchetypeType>()};
}
}
return {};
}
CompilerType SwiftASTContext::GetGenericArgumentType(void *type, size_t idx) {
VALID_OR_RETURN(CompilerType());
switch (GetGenericArgumentKind(type, idx)) {
case eBoundGenericKindType:
return GetBoundGenericType(type, idx);
case eUnboundGenericKindType:
return GetUnboundGenericType(type, idx);
default:
break;
}
return {};
}
CompilerType SwiftASTContext::GetTypeForFormatters(void *type) {
VALID_OR_RETURN(CompilerType());
if (type) {
swift::Type swift_type(GetSwiftType(type));
assert(&swift_type->getASTContext() == GetASTContext());
return {swift_type};
}
return {};
}
LazyBool SwiftASTContext::ShouldPrintAsOneLiner(void *type,
ValueObject *valobj) {
if (type) {
CompilerType can_compiler_type(GetCanonicalType(type));
if (IsImportedType(can_compiler_type, nullptr))
return eLazyBoolNo;
}
if (valobj) {
if (valobj->IsBaseClass())
return eLazyBoolNo;
if ((valobj->GetLanguageFlags() & LanguageFlags::eIsIndirectEnumCase) ==
LanguageFlags::eIsIndirectEnumCase)
return eLazyBoolNo;
}
return eLazyBoolCalculate;
}
bool SwiftASTContext::IsMeaninglessWithoutDynamicResolution(void *type) {
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
return swift_can_type->hasTypeParameter();
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::GenericTypeParam:
return true;
default:
return false;
}
}
return false;
}
//----------------------------------------------------------------------
// Dumping types
//----------------------------------------------------------------------
#define DEPTH_INCREMENT 2
#ifndef NDEBUG
LLVM_DUMP_METHOD void
SwiftASTContext::dump(lldb::opaque_compiler_type_t type) const {
if (!type)
return;
swift::Type swift_type = GetSwiftType(type);
swift_type.dump();
}
#endif
void SwiftASTContext::DumpValue(
void *type, ExecutionContext *exe_ctx, Stream *s, lldb::Format format,
const lldb_private::DataExtractor &data, lldb::offset_t data_byte_offset,
size_t data_byte_size, uint32_t bitfield_bit_size,
uint32_t bitfield_bit_offset, bool show_types, bool show_summary,
bool verbose, uint32_t depth) {}
bool SwiftASTContext::DumpTypeValue(
void *type, Stream *s, lldb::Format format,
const lldb_private::DataExtractor &data, lldb::offset_t byte_offset,
size_t byte_size, uint32_t bitfield_bit_size, uint32_t bitfield_bit_offset,
ExecutionContextScope *exe_scope, bool is_base_class) {
VALID_OR_RETURN(false);
if (!type)
return false;
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
const swift::TypeKind type_kind = swift_can_type->getKind();
switch (type_kind) {
case swift::TypeKind::Error:
break;
case swift::TypeKind::Class:
case swift::TypeKind::BoundGenericClass:
// If we have a class that is in a variable then it is a pointer,
// else if it is a base class, it has no value.
if (is_base_class)
break;
LLVM_FALLTHROUGH;
case swift::TypeKind::BuiltinInteger:
case swift::TypeKind::BuiltinFloat:
case swift::TypeKind::BuiltinRawPointer:
case swift::TypeKind::BuiltinNativeObject:
case swift::TypeKind::BuiltinUnsafeValueBuffer:
case swift::TypeKind::BuiltinUnknownObject:
case swift::TypeKind::BuiltinBridgeObject:
case swift::TypeKind::PrimaryArchetype:
case swift::TypeKind::OpenedArchetype:
case swift::TypeKind::NestedArchetype:
case swift::TypeKind::Function:
case swift::TypeKind::GenericFunction:
case swift::TypeKind::GenericTypeParam:
case swift::TypeKind::DependentMember:
case swift::TypeKind::LValue: {
uint32_t item_count = 1;
// A few formats, we might need to modify our size and count for
// depending on how we are trying to display the value.
switch (format) {
default:
case eFormatBoolean:
case eFormatBinary:
case eFormatComplex:
case eFormatCString: // NULL terminated C strings
case eFormatDecimal:
case eFormatEnum:
case eFormatHex:
case eFormatHexUppercase:
case eFormatFloat:
case eFormatOctal:
case eFormatOSType:
case eFormatUnsigned:
case eFormatPointer:
case eFormatVectorOfChar:
case eFormatVectorOfSInt8:
case eFormatVectorOfUInt8:
case eFormatVectorOfSInt16:
case eFormatVectorOfUInt16:
case eFormatVectorOfSInt32:
case eFormatVectorOfUInt32:
case eFormatVectorOfSInt64:
case eFormatVectorOfUInt64:
case eFormatVectorOfFloat32:
case eFormatVectorOfFloat64:
case eFormatVectorOfUInt128:
break;
case eFormatAddressInfo:
if (byte_size == 0) {
byte_size = exe_scope->CalculateTarget()
->GetArchitecture()
.GetAddressByteSize();
item_count = 1;
}
break;
case eFormatChar:
case eFormatCharPrintable:
case eFormatCharArray:
case eFormatBytes:
case eFormatBytesWithASCII:
item_count = byte_size;
byte_size = 1;
break;
case eFormatUnicode16:
item_count = byte_size / 2;
byte_size = 2;
break;
case eFormatUnicode32:
item_count = byte_size / 4;
byte_size = 4;
break;
}
return DumpDataExtractor(data, s, byte_offset, format, byte_size,
item_count, UINT32_MAX, LLDB_INVALID_ADDRESS,
bitfield_bit_size, bitfield_bit_offset, exe_scope);
} break;
case swift::TypeKind::BuiltinVector:
break;
case swift::TypeKind::Tuple:
break;
case swift::TypeKind::UnmanagedStorage:
case swift::TypeKind::UnownedStorage:
case swift::TypeKind::WeakStorage:
return CompilerType(swift_can_type->getReferenceStorageReferent())
.DumpTypeValue(s, format, data, byte_offset, byte_size,
bitfield_bit_size, bitfield_bit_offset, exe_scope,
is_base_class);
case swift::TypeKind::Enum:
case swift::TypeKind::BoundGenericEnum: {
SwiftEnumDescriptor *cached_enum_info = GetCachedEnumInfo(type);
if (cached_enum_info) {
auto enum_elem_info = cached_enum_info->GetElementFromData(data, true);
if (enum_elem_info)
s->Printf("%s", enum_elem_info->name.GetCString());
else {
lldb::offset_t ptr = 0;
if (data.GetByteSize())
s->Printf("<invalid> (0x%" PRIx8 ")", data.GetU8(&ptr));
else
s->Printf("<empty>");
}
return true;
} else
s->Printf("<unknown type>");
} break;
case swift::TypeKind::Struct:
case swift::TypeKind::Protocol:
return false;
case swift::TypeKind::ExistentialMetatype:
case swift::TypeKind::Metatype: {
return DumpDataExtractor(data, s, byte_offset, eFormatPointer, byte_size, 1,
UINT32_MAX, LLDB_INVALID_ADDRESS,
bitfield_bit_size, bitfield_bit_offset, exe_scope);
} break;
case swift::TypeKind::Module:
case swift::TypeKind::ProtocolComposition:
case swift::TypeKind::UnboundGeneric:
case swift::TypeKind::BoundGenericStruct:
case swift::TypeKind::TypeVariable:
case swift::TypeKind::DynamicSelf:
case swift::TypeKind::SILBox:
case swift::TypeKind::SILFunction:
case swift::TypeKind::SILBlockStorage:
case swift::TypeKind::Unresolved:
break;
case swift::TypeKind::Optional:
case swift::TypeKind::TypeAlias:
case swift::TypeKind::Paren:
case swift::TypeKind::Dictionary:
case swift::TypeKind::ArraySlice:
assert(false && "Not a canonical type");
break;
case swift::TypeKind::SILToken:
break;
}
return 0;
}
bool SwiftASTContext::IsImportedType(const CompilerType &type,
CompilerType *original_type) {
bool success = false;
if (llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
do {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
swift::NominalType *nominal_type =
swift_can_type->getAs<swift::NominalType>();
if (!nominal_type)
break;
swift::NominalTypeDecl *nominal_type_decl = nominal_type->getDecl();
if (nominal_type_decl && nominal_type_decl->hasClangNode()) {
const clang::Decl *clang_decl = nominal_type_decl->getClangDecl();
if (!clang_decl)
break;
success = true;
if (!original_type)
break;
// ObjCInterfaceDecl is not a TypeDecl.
if (const clang::ObjCInterfaceDecl *objc_interface_decl =
llvm::dyn_cast<clang::ObjCInterfaceDecl>(clang_decl)) {
*original_type =
CompilerType(&objc_interface_decl->getASTContext(),
clang::QualType::getFromOpaquePtr(
objc_interface_decl->getTypeForDecl()));
} else if (const clang::TypeDecl *type_decl =
llvm::dyn_cast<clang::TypeDecl>(clang_decl)) {
*original_type = CompilerType(
&type_decl->getASTContext(),
clang::QualType::getFromOpaquePtr(type_decl->getTypeForDecl()));
} else {
// TODO: any more cases that we care about?
*original_type = CompilerType();
}
}
} while (0);
}
return success;
}
bool SwiftASTContext::IsImportedObjectiveCType(const CompilerType &type,
CompilerType *original_type) {
bool success = false;
if (llvm::dyn_cast_or_null<SwiftASTContext>(type.GetTypeSystem())) {
CompilerType local_original_type;
if (IsImportedType(type, &local_original_type)) {
if (local_original_type.IsValid()) {
ClangASTContext *clang_ast = llvm::dyn_cast_or_null<ClangASTContext>(
local_original_type.GetTypeSystem());
if (clang_ast &&
clang_ast->IsObjCObjectOrInterfaceType(local_original_type)) {
if (original_type)
*original_type = local_original_type;
success = true;
}
}
}
}
return success;
}
void SwiftASTContext::DumpSummary(void *type, ExecutionContext *exe_ctx,
Stream *s,
const lldb_private::DataExtractor &data,
lldb::offset_t data_byte_offset,
size_t data_byte_size) {}
size_t SwiftASTContext::ConvertStringToFloatValue(void *type, const char *s,
uint8_t *dst,
size_t dst_size) {
return 0;
}
void SwiftASTContext::DumpTypeDescription(void *type) {
StreamFile s(stdout, false);
DumpTypeDescription(type, &s);
}
void SwiftASTContext::DumpTypeDescription(void *type, Stream *s) {
DumpTypeDescription(type, s, false, true);
}
void SwiftASTContext::DumpTypeDescription(void *type,
bool print_help_if_available,
bool print_extensions_if_available) {
StreamFile s(stdout, false);
DumpTypeDescription(type, &s, print_help_if_available,
print_extensions_if_available);
}
static void PrintSwiftNominalType(swift::NominalTypeDecl *nominal_type_decl,
Stream *s, bool print_help_if_available,
bool print_extensions_if_available) {
if (nominal_type_decl && s) {
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
nominal_type_decl->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
if (print_extensions_if_available) {
for (auto ext : nominal_type_decl->getExtensions()) {
if (ext) {
buffer.clear();
llvm::raw_string_ostream ext_ostream(buffer);
ext->print(ext_ostream, print_options);
ext_ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
}
}
}
}
}
void SwiftASTContext::DumpTypeDescription(void *type, Stream *s,
bool print_help_if_available,
bool print_extensions_if_available) {
llvm::SmallVector<char, 1024> buf;
llvm::raw_svector_ostream llvm_ostrm(buf);
if (type) {
swift::CanType swift_can_type(GetCanonicalSwiftType(type));
switch (swift_can_type->getKind()) {
case swift::TypeKind::Module: {
swift::ModuleType *module_type =
swift_can_type->castTo<swift::ModuleType>();
swift::ModuleDecl *module = module_type->getModule();
llvm::SmallVector<swift::Decl *, 10> decls;
module->getDisplayDecls(decls);
for (swift::Decl *decl : decls) {
swift::DeclKind kind = decl->getKind();
if (kind >= swift::DeclKind::First_TypeDecl &&
kind <= swift::DeclKind::Last_TypeDecl) {
swift::TypeDecl *type_decl =
llvm::dyn_cast_or_null<swift::TypeDecl>(decl);
if (type_decl) {
CompilerType clang_type(
type_decl->getDeclaredInterfaceType().getPointer());
if (clang_type) {
Flags clang_type_flags(clang_type.GetTypeInfo());
DumpTypeDescription(clang_type.GetOpaqueQualType(), s,
print_help_if_available,
print_extensions_if_available);
}
}
} else if (kind == swift::DeclKind::Func ||
kind == swift::DeclKind::Var) {
std::string buffer;
llvm::raw_string_ostream stream(buffer);
decl->print(stream,
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
stream.flush();
s->Printf("%s\n", buffer.c_str());
} else if (kind == swift::DeclKind::Import) {
swift::ImportDecl *import_decl =
llvm::dyn_cast_or_null<swift::ImportDecl>(decl);
if (import_decl) {
switch (import_decl->getImportKind()) {
case swift::ImportKind::Module: {
swift::ModuleDecl *imported_module = import_decl->getModule();
if (imported_module) {
s->Printf("import %s\n", imported_module->getName().get());
}
} break;
default: {
for (swift::Decl *imported_decl : import_decl->getDecls()) {
// All of the non-module things you can import should
// be a ValueDecl.
if (swift::ValueDecl *imported_value_decl =
llvm::dyn_cast_or_null<swift::ValueDecl>(
imported_decl)) {
if (swift::TypeBase *decl_type =
imported_value_decl->getInterfaceType()
.getPointer()) {
DumpTypeDescription(decl_type, s, print_help_if_available,
print_extensions_if_available);
}
}
}
} break;
}
}
}
}
break;
}
case swift::TypeKind::Metatype: {
s->PutCString("metatype ");
swift::MetatypeType *metatype_type =
swift_can_type->castTo<swift::MetatypeType>();
DumpTypeDescription(metatype_type->getInstanceType().getPointer(),
print_help_if_available,
print_extensions_if_available);
} break;
case swift::TypeKind::UnboundGeneric: {
swift::UnboundGenericType *unbound_generic_type =
swift_can_type->castTo<swift::UnboundGenericType>();
auto nominal_type_decl = llvm::dyn_cast<swift::NominalTypeDecl>(
unbound_generic_type->getDecl());
if (nominal_type_decl) {
PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available,
print_extensions_if_available);
}
} break;
case swift::TypeKind::GenericFunction:
case swift::TypeKind::Function: {
swift::AnyFunctionType *any_function_type =
swift_can_type->castTo<swift::AnyFunctionType>();
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
any_function_type->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
} break;
case swift::TypeKind::Tuple: {
swift::TupleType *tuple_type = swift_can_type->castTo<swift::TupleType>();
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
tuple_type->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
} break;
case swift::TypeKind::BoundGenericClass:
case swift::TypeKind::BoundGenericEnum:
case swift::TypeKind::BoundGenericStruct: {
swift::BoundGenericType *bound_generic_type =
swift_can_type->castTo<swift::BoundGenericType>();
swift::NominalTypeDecl *nominal_type_decl = bound_generic_type->getDecl();
PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available,
print_extensions_if_available);
} break;
case swift::TypeKind::BuiltinInteger: {
swift::BuiltinIntegerType *builtin_integer_type =
swift_can_type->castTo<swift::BuiltinIntegerType>();
s->Printf("builtin integer type of width %u bits\n",
builtin_integer_type->getWidth().getGreatestWidth());
break;
}
case swift::TypeKind::BuiltinFloat: {
swift::BuiltinFloatType *builtin_float_type =
swift_can_type->castTo<swift::BuiltinFloatType>();
s->Printf("builtin floating-point type of width %u bits\n",
builtin_float_type->getBitWidth());
break;
}
case swift::TypeKind::ProtocolComposition: {
swift::ProtocolCompositionType *protocol_composition_type =
swift_can_type->castTo<swift::ProtocolCompositionType>();
std::string buffer;
llvm::raw_string_ostream ostream(buffer);
const swift::PrintOptions &print_options(
SwiftASTContext::GetUserVisibleTypePrintingOptions(
print_help_if_available));
protocol_composition_type->print(ostream, print_options);
ostream.flush();
if (buffer.empty() == false)
s->Printf("%s\n", buffer.c_str());
break;
}
default: {
swift::NominalType *nominal_type =
llvm::dyn_cast_or_null<swift::NominalType>(
swift_can_type.getPointer());
if (nominal_type) {
swift::NominalTypeDecl *nominal_type_decl = nominal_type->getDecl();
PrintSwiftNominalType(nominal_type_decl, s, print_help_if_available,
print_extensions_if_available);
}
} break;
}
if (buf.size() > 0) {
s->Write(buf.data(), buf.size());
}
}
}
TypeSP SwiftASTContext::GetCachedType(ConstString mangled) {
TypeSP type_sp;
if (m_swift_type_map.Lookup(mangled.GetCString(), type_sp))
return type_sp;
else
return TypeSP();
}
void SwiftASTContext::SetCachedType(ConstString mangled,
const TypeSP &type_sp) {
m_swift_type_map.Insert(mangled.GetCString(), type_sp);
}
DWARFASTParser *SwiftASTContext::GetDWARFParser() {
if (!m_dwarf_ast_parser_ap)
m_dwarf_ast_parser_ap.reset(new DWARFASTParserSwift(*this));
return m_dwarf_ast_parser_ap.get();
}
std::vector<lldb::DataBufferSP> &
SwiftASTContext::GetASTVectorForModule(const Module *module) {
return m_ast_file_data_map[const_cast<Module *>(module)];
}
SwiftASTContextForExpressions::SwiftASTContextForExpressions(
std::string description, Target &target)
: SwiftASTContext(std::move(description),
target.GetArchitecture().GetTriple(), &target),
m_persistent_state_up(new SwiftPersistentExpressionState) {}
UserExpression *SwiftASTContextForExpressions::GetUserExpression(
llvm::StringRef expr, llvm::StringRef prefix, lldb::LanguageType language,
Expression::ResultType desired_type,
const EvaluateExpressionOptions &options) {
TargetSP target_sp = m_target_wp.lock();
if (!target_sp)
return nullptr;
return new SwiftUserExpression(*target_sp.get(), expr, prefix, language,
desired_type, options);
}
PersistentExpressionState *
SwiftASTContextForExpressions::GetPersistentExpressionState() {
return m_persistent_state_up.get();
}
static void DescribeFileUnit(Stream &s, swift::FileUnit *file_unit) {
s.PutCString("kind = ");
switch (file_unit->getKind()) {
default: {
s.PutCString("<unknown>");
}
case swift::FileUnitKind::Source: {
s.PutCString("Source, ");
if (swift::SourceFile *source_file =
llvm::dyn_cast<swift::SourceFile>(file_unit)) {
s.Printf("filename = \"%s\", ", source_file->getFilename().str().c_str());
s.PutCString("source file kind = ");
switch (source_file->Kind) {
case swift::SourceFileKind::Library:
s.PutCString("Library");
case swift::SourceFileKind::Main:
s.PutCString("Main");
case swift::SourceFileKind::REPL:
s.PutCString("REPL");
case swift::SourceFileKind::SIL:
s.PutCString("SIL");
}
}
} break;
case swift::FileUnitKind::Builtin: {
s.PutCString("Builtin");
} break;
case swift::FileUnitKind::SerializedAST:
case swift::FileUnitKind::ClangModule: {
if (file_unit->getKind() == swift::FileUnitKind::SerializedAST)
s.PutCString("Serialized Swift AST, ");
else
s.PutCString("Clang module, ");
swift::LoadedFile *loaded_file = llvm::cast<swift::LoadedFile>(file_unit);
s.Printf("filename = \"%s\"", loaded_file->getFilename().str().c_str());
} break;
};
}
// Gets the full module name from the module passed in.
static void GetNameFromModule(swift::ModuleDecl *module, std::string &result) {
result.clear();
if (module) {
const char *name = module->getName().get();
if (!name)
return;
result.append(name);
const clang::Module *clang_module = module->findUnderlyingClangModule();
// At present, there doesn't seem to be any way to get the full module path
// from the Swift side.
if (!clang_module)
return;
for (const clang::Module *cur_module = clang_module->Parent; cur_module;
cur_module = cur_module->Parent) {
if (!cur_module->Name.empty()) {
result.insert(0, 1, '.');
result.insert(0, cur_module->Name);
}
}
}
}
static bool
LoadOneModule(const SourceModule &module, SwiftASTContext &swift_ast_context,
lldb::StackFrameWP &stack_frame_wp,
llvm::SmallVectorImpl<swift::SourceFile::ImportedModuleDesc>
&additional_imports,
Status &error) {
if (!module.path.size())
return false;
error.Clear();
ConstString toplevel = module.path.front();
llvm::SmallString<1> m_description;
LOG_PRINTF(LIBLLDB_LOG_EXPRESSIONS, "Importing module %s",
toplevel.AsCString());
swift::ModuleDecl *swift_module = nullptr;
lldb::StackFrameSP this_frame_sp(stack_frame_wp.lock());
swift::ModuleDecl *imported_header_module =
swift_ast_context.GetClangImporter()->getImportedHeaderModule();
if (toplevel.GetStringRef() == imported_header_module->getName().str())
swift_module = imported_header_module;
else if (this_frame_sp) {
lldb::ProcessSP process_sp(this_frame_sp->CalculateProcess());
if (process_sp)
swift_module =
swift_ast_context.FindAndLoadModule(module, *process_sp.get(), error);
else
swift_module = swift_ast_context.GetModule(module, error);
} else
swift_module = swift_ast_context.GetModule(module, error);
if (!swift_module || !error.Success() || swift_ast_context.HasFatalErrors()) {
LOG_PRINTF(LIBLLDB_LOG_EXPRESSIONS, "Couldn't import module %s: %s",
toplevel.AsCString(), error.AsCString());
if (!swift_module || swift_ast_context.HasFatalErrors()) {
return false;
}
}
if (lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_EXPRESSIONS)) {
LOG_PRINTF(LIBLLDB_LOG_EXPRESSIONS, "Importing %s with source files:",
module.path.front().AsCString());
for (swift::FileUnit *file_unit : swift_module->getFiles()) {
StreamString ss;
DescribeFileUnit(ss, file_unit);
LOG_PRINTF(LIBLLDB_LOG_EXPRESSIONS, " %s", ss.GetData());
}
}
additional_imports.push_back(swift::SourceFile::ImportedModuleDesc(
std::make_pair(swift::ModuleDecl::AccessPathTy(), swift_module),
swift::SourceFile::ImportOptions()));
return true;
}
bool SwiftASTContext::PerformUserImport(SwiftASTContext &swift_ast_context,
SymbolContext &sc,
ExecutionContextScope &exe_scope,
lldb::StackFrameWP &stack_frame_wp,
swift::SourceFile &source_file,
Status &error) {
llvm::SmallString<1> m_description;
llvm::SmallVector<swift::SourceFile::ImportedModuleDesc, 2>
additional_imports;
llvm::SmallVector<swift::ModuleDecl::ImportedModule, 2> parsed_imports;
swift::ModuleDecl::ImportFilter import_filter;
import_filter |= swift::ModuleDecl::ImportFilterKind::Public;
import_filter |= swift::ModuleDecl::ImportFilterKind::Private;
source_file.getImportedModules(parsed_imports, import_filter);
auto *persistent_expression_state =
sc.target_sp->GetSwiftPersistentExpressionState(exe_scope);
for (auto module_pair : parsed_imports) {
swift::ModuleDecl *module = module_pair.second;
if (module) {
std::string module_name;
GetNameFromModule(module, module_name);
if (!module_name.empty()) {
SourceModule module_info;
ConstString module_const_str(module_name);
module_info.path.push_back(module_const_str);
LOG_PRINTF(LIBLLDB_LOG_EXPRESSIONS,
"Performing auto import on found module: %s.\n",
module_name.c_str());
if (!LoadOneModule(module_info, swift_ast_context, stack_frame_wp,
additional_imports, error))
return false;
// How do we tell we are in REPL or playground mode?
persistent_expression_state->AddHandLoadedModule(module_const_str);
}
}
}
// Finally get the hand-loaded modules from the
// SwiftPersistentExpressionState and load them into this context:
for (ConstString name : persistent_expression_state->GetHandLoadedModules()) {
SourceModule module_info;
module_info.path.push_back(name);
if (!LoadOneModule(module_info, swift_ast_context, stack_frame_wp,
additional_imports, error))
return false;
}
source_file.addImports(additional_imports);
return true;
}
bool SwiftASTContext::PerformAutoImport(SwiftASTContext &swift_ast_context,
SymbolContext &sc,
lldb::StackFrameWP &stack_frame_wp,
swift::SourceFile *source_file,
Status &error) {
llvm::SmallVector<swift::SourceFile::ImportedModuleDesc, 2>
additional_imports;
// Import the Swift standard library and its dependecies.
SourceModule swift_module;
swift_module.path.push_back(ConstString("Swift"));
if (!LoadOneModule(swift_module, swift_ast_context, stack_frame_wp,
additional_imports, error))
return false;
CompileUnit *compile_unit = sc.comp_unit;
if (compile_unit && compile_unit->GetLanguage() == lldb::eLanguageTypeSwift)
for (const SourceModule &module : compile_unit->GetImportedModules()) {
// When building the Swift stdlib with debug info these will
// show up in "Swift.o", but we already imported them and
// manually importing them will fail.
if (module.path.size() &&
llvm::StringSwitch<bool>(module.path.front().GetStringRef())
.Cases("Swift", "SwiftShims", "Builtin", true)
.Default(false))
continue;
if (!LoadOneModule(module, swift_ast_context, stack_frame_wp,
additional_imports, error))
return false;
}
// source_file might be NULL outside of the expression parser, where
// we don't need to notify the source file of additional imports.
if (source_file)
source_file->addImports(additional_imports);
return true;
}
| 1 | 19,533 | Does this not apply to C enums on Linux? | apple-swift-lldb | cpp |
@@ -511,7 +511,11 @@ function makeOperationHandler(server, connection, cmd, options, callback) {
session.serverSession.isDirty = true;
}
- if (supportsRetryableWrites(server) && !inActiveTransaction(session, cmd)) {
+ if (
+ isRetryableWriteError(err) &&
+ supportsRetryableWrites(server) &&
+ !inActiveTransaction(session, cmd)
+ ) {
err.addErrorLabel('RetryableWriteError');
}
| 1 | 'use strict';
const EventEmitter = require('events');
const Logger = require('../logger');
const ReadPreference = require('../read_preference');
const { ConnectionPool } = require('../cmap/connection_pool');
const { CMAP_EVENT_NAMES } = require('../cmap/events');
const { ServerDescription, compareTopologyVersion } = require('./server_description');
const { Monitor } = require('./monitor');
const {
relayEvents,
collationNotSupported,
debugOptions,
makeStateMachine,
maxWireVersion
} = require('../utils');
const {
ServerType,
STATE_CLOSED,
STATE_CLOSING,
STATE_CONNECTING,
STATE_CONNECTED
} = require('./common');
const {
MongoError,
MongoNetworkError,
MongoNetworkTimeoutError,
isSDAMUnrecoverableError,
isRetryableWriteError,
isNodeShuttingDownError,
isNetworkErrorBeforeHandshake
} = require('../error');
const { isTransactionCommand } = require('../transactions');
// Used for filtering out fields for logging
const DEBUG_FIELDS = [
'reconnect',
'reconnectTries',
'reconnectInterval',
'emitError',
'cursorFactory',
'host',
'port',
'size',
'keepAlive',
'keepAliveInitialDelay',
'noDelay',
'connectionTimeout',
'checkServerIdentity',
'socketTimeout',
'ssl',
'ca',
'crl',
'cert',
'key',
'rejectUnauthorized',
'promoteLongs',
'promoteValues',
'promoteBuffers',
'servername'
];
const stateTransition = makeStateMachine({
[STATE_CLOSED]: [STATE_CLOSED, STATE_CONNECTING],
[STATE_CONNECTING]: [STATE_CONNECTING, STATE_CLOSING, STATE_CONNECTED, STATE_CLOSED],
[STATE_CONNECTED]: [STATE_CONNECTED, STATE_CLOSING, STATE_CLOSED],
[STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED]
});
const kMonitor = Symbol('monitor');
/**
* @fires Server#serverHeartbeatStarted
* @fires Server#serverHeartbeatSucceeded
* @fires Server#serverHeartbeatFailed
*/
class Server extends EventEmitter {
/**
* Create a server
*
* @param {ServerDescription} description
* @param {object} options
* @param {any} topology
*/
constructor(description, options, topology) {
super();
this.s = {
// the server description
description,
// a saved copy of the incoming options
options,
// the server logger
logger: Logger('Server', options),
// the server state
state: STATE_CLOSED,
credentials: options.credentials,
topology
};
// create the connection pool
// NOTE: this used to happen in `connect`, we supported overriding pool options there
const addressParts = this.description.address.split(':');
const poolOptions = Object.assign(
{ host: addressParts[0], port: parseInt(addressParts[1], 10) },
options
);
this.s.pool = new ConnectionPool(poolOptions);
relayEvents(
this.s.pool,
this,
['commandStarted', 'commandSucceeded', 'commandFailed'].concat(CMAP_EVENT_NAMES)
);
this.s.pool.on('clusterTimeReceived', clusterTime => {
this.clusterTime = clusterTime;
});
// create the monitor
this[kMonitor] = new Monitor(this, this.s.options);
relayEvents(this[kMonitor], this, [
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed',
// legacy events
'monitoring'
]);
this[kMonitor].on('resetConnectionPool', () => {
this.s.pool.clear();
});
this[kMonitor].on('resetServer', error => markServerUnknown(this, error));
this[kMonitor].on('serverHeartbeatSucceeded', event => {
this.emit(
'descriptionReceived',
new ServerDescription(this.description.address, event.reply, {
roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration)
})
);
if (this.s.state === STATE_CONNECTING) {
stateTransition(this, STATE_CONNECTED);
this.emit('connect', this);
}
});
}
get description() {
return this.s.description;
}
get name() {
return this.s.description.address;
}
get autoEncrypter() {
if (this.s.options && this.s.options.autoEncrypter) {
return this.s.options.autoEncrypter;
}
return null;
}
/**
* Initiate server connect
*/
connect() {
if (this.s.state !== STATE_CLOSED) {
return;
}
stateTransition(this, STATE_CONNECTING);
this[kMonitor].connect();
}
/**
* Destroy the server connection
*
* @param {object} [options] Optional settings
* @param {boolean} [options.force=false] Force destroy the pool
* @param {any} callback
*/
destroy(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, { force: false }, options);
if (this.s.state === STATE_CLOSED) {
if (typeof callback === 'function') {
callback();
}
return;
}
stateTransition(this, STATE_CLOSING);
this[kMonitor].close();
this.s.pool.close(options, err => {
stateTransition(this, STATE_CLOSED);
this.emit('closed');
if (typeof callback === 'function') {
callback(err);
}
});
}
/**
* Immediately schedule monitoring of this server. If there already an attempt being made
* this will be a no-op.
*/
requestCheck() {
this[kMonitor].requestCheck();
}
/**
* Execute a command
*
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {object} cmd The command hash
* @param {object} [options] Optional settings
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
* @param {boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
* @param {boolean} [options.checkKeys=false] Specify if the bson parser should validate keys.
* @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
* @param {boolean} [options.fullResult=false] Return the full envelope instead of just the result document.
* @param {ClientSession} [options.session] Session to use for the operation
* @param {opResultCallback} callback A callback function
*/
command(ns, cmd, options, callback) {
if (typeof options === 'function') {
(callback = options), (options = {}), (options = options || {});
}
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
callback(new MongoError('server is closed'));
return;
}
const error = basicReadValidations(this, options);
if (error) {
return callback(error);
}
// Clone the options
options = Object.assign({}, options, { wireProtocolCommand: false });
// Debug log
if (this.s.logger.isDebug()) {
this.s.logger.debug(
`executing command [${JSON.stringify({
ns,
cmd,
options: debugOptions(DEBUG_FIELDS, options)
})}] against ${this.name}`
);
}
// error if collation not supported
if (collationNotSupported(this, cmd)) {
callback(new MongoError(`server ${this.name} does not support collation`));
return;
}
this.s.pool.withConnection((err, conn, cb) => {
if (err) {
markServerUnknown(this, err);
return cb(err);
}
conn.command(ns, cmd, options, makeOperationHandler(this, conn, cmd, options, cb));
}, callback);
}
/**
* Execute a query against the server
*
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {object} cmd The command document for the query
* @param {any} cursorState
* @param {object} options Optional settings
* @param {Function} callback
*/
query(ns, cmd, cursorState, options, callback) {
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
callback(new MongoError('server is closed'));
return;
}
this.s.pool.withConnection((err, conn, cb) => {
if (err) {
markServerUnknown(this, err);
return cb(err);
}
conn.query(ns, cmd, cursorState, options, makeOperationHandler(this, conn, cmd, options, cb));
}, callback);
}
/**
* Execute a `getMore` against the server
*
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {object} cursorState State data associated with the cursor calling this method
* @param {any} batchSize
* @param {object} options Optional settings
* @param {Function} callback
*/
getMore(ns, cursorState, batchSize, options, callback) {
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
callback(new MongoError('server is closed'));
return;
}
this.s.pool.withConnection((err, conn, cb) => {
if (err) {
markServerUnknown(this, err);
return cb(err);
}
conn.getMore(
ns,
cursorState,
batchSize,
options,
makeOperationHandler(this, conn, null, options, cb)
);
}, callback);
}
/**
* Execute a `killCursors` command against the server
*
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {object} cursorState State data associated with the cursor calling this method
* @param {Function} callback
*/
killCursors(ns, cursorState, callback) {
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
if (typeof callback === 'function') {
callback(new MongoError('server is closed'));
}
return;
}
this.s.pool.withConnection((err, conn, cb) => {
if (err) {
markServerUnknown(this, err);
return cb(err);
}
conn.killCursors(ns, cursorState, makeOperationHandler(this, conn, null, undefined, cb));
}, callback);
}
/**
* Insert one or more documents
*
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {Array} ops An array of documents to insert
* @param {object} options
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
* @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
* @param {ClientSession} [options.session] Session to use for the operation
* @param {opResultCallback} callback A callback function
*/
insert(ns, ops, options, callback) {
executeWriteOperation({ server: this, op: 'insert', ns, ops }, options, callback);
}
/**
* Perform one or more update operations
*
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {Array} ops An array of updates
* @param {object} options
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
* @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
* @param {ClientSession} [options.session] Session to use for the operation
* @param {opResultCallback} callback A callback function
*/
update(ns, ops, options, callback) {
executeWriteOperation({ server: this, op: 'update', ns, ops }, options, callback);
}
/**
* Perform one or more remove operations
*
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
* @param {Array} ops An array of removes
* @param {object} options options for removal
* @param {boolean} [options.ordered=true] Execute in order or out of order
* @param {object} [options.writeConcern={}] Write concern for the operation
* @param {boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
* @param {boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
* @param {ClientSession} [options.session] Session to use for the operation
* @param {opResultCallback} callback A callback function
*/
remove(ns, ops, options, callback) {
executeWriteOperation({ server: this, op: 'remove', ns, ops }, options, callback);
}
}
Object.defineProperty(Server.prototype, 'clusterTime', {
get: function() {
return this.s.topology.clusterTime;
},
set: function(clusterTime) {
this.s.topology.clusterTime = clusterTime;
}
});
function supportsRetryableWrites(server) {
return (
server.description.maxWireVersion >= 6 &&
server.description.logicalSessionTimeoutMinutes &&
server.description.type !== ServerType.Standalone
);
}
function calculateRoundTripTime(oldRtt, duration) {
const alpha = 0.2;
return alpha * duration + (1 - alpha) * oldRtt;
}
function basicReadValidations(server, options) {
if (options.readPreference && !(options.readPreference instanceof ReadPreference)) {
return new MongoError('readPreference must be an instance of ReadPreference');
}
}
function executeWriteOperation(args, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// TODO: once we drop Node 4, use destructuring either here or in arguments.
const server = args.server;
const op = args.op;
const ns = args.ns;
const ops = Array.isArray(args.ops) ? args.ops : [args.ops];
if (server.s.state === STATE_CLOSING || server.s.state === STATE_CLOSED) {
callback(new MongoError('server is closed'));
return;
}
if (collationNotSupported(server, options)) {
callback(new MongoError(`server ${server.name} does not support collation`));
return;
}
const unacknowledgedWrite = options.writeConcern && options.writeConcern.w === 0;
if (unacknowledgedWrite || maxWireVersion(server) < 5) {
if ((op === 'update' || op === 'remove') && ops.find(o => o.hint)) {
callback(new MongoError(`servers < 3.4 do not support hint on ${op}`));
return;
}
}
server.s.pool.withConnection((err, conn, cb) => {
if (err) {
markServerUnknown(server, err);
return cb(err);
}
conn[op](ns, ops, options, makeOperationHandler(server, conn, ops, options, cb));
}, callback);
}
function markServerUnknown(server, error) {
if (error instanceof MongoNetworkError && !(error instanceof MongoNetworkTimeoutError)) {
server[kMonitor].reset();
}
server.emit(
'descriptionReceived',
new ServerDescription(server.description.address, null, {
error,
topologyVersion:
error && error.topologyVersion ? error.topologyVersion : server.description.topologyVersion
})
);
}
function connectionIsStale(pool, connection) {
return connection.generation !== pool.generation;
}
function shouldHandleStateChangeError(server, err) {
const etv = err.topologyVersion;
const stv = server.description.topologyVersion;
return compareTopologyVersion(stv, etv) < 0;
}
function inActiveTransaction(session, cmd) {
return session && session.inTransaction() && !isTransactionCommand(cmd);
}
function makeOperationHandler(server, connection, cmd, options, callback) {
const session = options && options.session;
return function handleOperationResult(err, result) {
if (err && !connectionIsStale(server.s.pool, connection)) {
if (err instanceof MongoNetworkError) {
if (session && !session.hasEnded) {
session.serverSession.isDirty = true;
}
if (supportsRetryableWrites(server) && !inActiveTransaction(session, cmd)) {
err.addErrorLabel('RetryableWriteError');
}
if (!(err instanceof MongoNetworkTimeoutError) || isNetworkErrorBeforeHandshake(err)) {
markServerUnknown(server, err);
server.s.pool.clear();
}
} else {
// if pre-4.4 server, then add error label if its a retryable write error
if (
maxWireVersion(server) < 9 &&
isRetryableWriteError(err) &&
!inActiveTransaction(session, cmd)
) {
err.addErrorLabel('RetryableWriteError');
}
if (isSDAMUnrecoverableError(err)) {
if (shouldHandleStateChangeError(server, err)) {
if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(err)) {
server.s.pool.clear();
}
markServerUnknown(server, err);
process.nextTick(() => server.requestCheck());
}
}
}
}
callback(err, result);
};
}
module.exports = {
Server
};
| 1 | 17,760 | Should this check if the error is `RetryableWriteError` before adding the label? | mongodb-node-mongodb-native | js |
@@ -1,6 +1,8 @@
const Plugin = require('../core/Plugin')
const { findDOMElement } = require('../core/Utils')
-const getFormData = require('get-form-data').default
+// Rollup uses get-form-data's ES modules build, and rollup-plugin-commonjs automatically resolves `.default`.
+// So, if we are being built using rollup, this require() won't have a `.default` property.
+const getFormData = require('get-form-data').default || require('get-form-data')
/**
* Form | 1 | const Plugin = require('../core/Plugin')
const { findDOMElement } = require('../core/Utils')
const getFormData = require('get-form-data').default
/**
* Form
*/
module.exports = class Form extends Plugin {
constructor (uppy, opts) {
super(uppy, opts)
this.type = 'acquirer'
this.id = 'Form'
this.title = 'Form'
// set default options
const defaultOptions = {
target: null,
resultName: 'uppyResult',
getMetaFromForm: true,
addResultToForm: true,
submitOnSuccess: false,
triggerUploadOnSubmit: false
}
// merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
this.handleFormSubmit = this.handleFormSubmit.bind(this)
this.handleUploadStart = this.handleUploadStart.bind(this)
this.handleSuccess = this.handleSuccess.bind(this)
this.addResultToForm = this.addResultToForm.bind(this)
this.getMetaFromForm = this.getMetaFromForm.bind(this)
}
handleUploadStart () {
if (this.opts.getMetaFromForm) {
this.getMetaFromForm()
}
}
handleSuccess (result) {
if (this.opts.addResultToForm) {
this.addResultToForm(result)
}
if (this.opts.submitOnSuccess) {
this.form.submit()
}
}
handleFormSubmit (ev) {
if (this.opts.triggerUploadOnSubmit) {
ev.preventDefault()
this.uppy.upload()
}
}
addResultToForm (result) {
this.uppy.log('[Form] Adding result to the original form:')
this.uppy.log(result)
let resultInput = this.form.querySelector(`[name="${this.opts.resultName}"]`)
if (resultInput) {
resultInput.value = JSON.stringify(result)
return
}
resultInput = document.createElement('input')
resultInput.name = this.opts.resultName
resultInput.type = 'hidden'
resultInput.value = JSON.stringify(result)
this.form.appendChild(resultInput)
}
getMetaFromForm () {
const formMeta = getFormData(this.form)
this.uppy.setMeta(formMeta)
}
install () {
this.form = findDOMElement(this.opts.target)
if (!this.form || !this.form.nodeName === 'FORM') {
console.error('Form plugin requires a <form> target element passed in options to operate, none was found', 'error')
return
}
this.form.addEventListener('submit', this.handleFormSubmit)
this.uppy.on('upload', this.handleUploadStart)
this.uppy.on('complete', this.handleSuccess)
}
uninstall () {
this.form.removeEventListener('submit', this.handleFormSubmit)
this.uppy.off('upload', this.handleUploadStart)
this.uppy.off('complete', this.handleSuccess)
}
}
| 1 | 10,620 | Oh wow, that's one complicated require :) | transloadit-uppy | js |
@@ -1,6 +1,6 @@
// snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
// snippet-sourceauthor:[Doug-AWS]
-// snippet-sourcedescription:[Using context.Context with SDK requests.]
+// snippet-sourcedescription:[request_context.go shows how to usie context.Context with SDK requests.]
// snippet-keyword:[Extending the SDK]
// snippet-keyword:[Go]
// snippet-service:[aws-go-sdk] | 1 | // snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
// snippet-sourceauthor:[Doug-AWS]
// snippet-sourcedescription:[Using context.Context with SDK requests.]
// snippet-keyword:[Extending the SDK]
// snippet-keyword:[Go]
// snippet-service:[aws-go-sdk]
// snippet-sourcetype:[snippet]
// snippet-sourcedate:[2019-03-14]
/*
Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
This file is licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License. A copy of
the License is located at
http://aws.amazon.com/apache2.0/
This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
*/
package main
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
// ??? context
"github.com/aws/aws-sdk-go/service/sqs"
"context"
"fmt"
"time"
)
func main() {
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
svc := sqs.New(sess)
// URL to our queue
qURL := "QueueURL"
// start snippet
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// SQS ReceiveMessage
params := &sqs.ReceiveMessageInput{
AttributeNames: []*string{
aws.String(sqs.MessageSystemAttributeNameSentTimestamp),
},
MessageAttributeNames: []*string{
aws.String(sqs.QueueAttributeNameAll),
},
QueueUrl: &qURL,
MaxNumberOfMessages: aws.Int64(1),
VisibilityTimeout: aws.Int64(20), // 20 seconds
WaitTimeSeconds: aws.Int64(0),
}
req, resp := svc.ReceiveMessageRequest(params)
req.HTTPRequest = req.HTTPRequest.WithContext(ctx)
err := req.Send()
// end snippet
if err != nil {
fmt.Println("Got error receiving message:")
fmt.Println(err.Error())
} else {
fmt.Println(resp)
}
}
| 1 | 15,680 | do you mean how to "use" | awsdocs-aws-doc-sdk-examples | rb |
@@ -400,8 +400,9 @@ describe('forwardRef', () => {
const Transition = ({ children }) => {
const state = useState(0);
forceTransition = state[1];
- expect(children.ref).to.not.be.undefined;
- if (state[0] === 0) expect(children.props.ref).to.be.undefined;
+ // TODO: ref exists on the backing node
+ // expect(children.ref).to.not.be.undefined;
+ // if (state[0] === 0) expect(children.props.ref).to.be.undefined;
return children;
};
| 1 | import React, {
createElement,
render,
createRef,
forwardRef,
hydrate,
memo,
useState,
useRef,
useImperativeHandle,
createPortal
} from 'preact/compat';
import { setupScratch, teardown } from '../../../test/_util/helpers';
import { setupRerender, act } from 'preact/test-utils';
import { getSymbol } from './testUtils';
/* eslint-disable react/jsx-boolean-value, react/display-name, prefer-arrow-callback */
describe('forwardRef', () => {
/** @type {HTMLDivElement} */
let scratch, rerender;
beforeEach(() => {
scratch = setupScratch();
rerender = setupRerender();
});
afterEach(() => {
teardown(scratch);
});
it('should have isReactComponent flag', () => {
let App = forwardRef((_, ref) => <div ref={ref}>foo</div>);
expect(App.prototype.isReactComponent).to.equal(true);
});
it('should have $$typeof property', () => {
let App = forwardRef((_, ref) => <div ref={ref}>foo</div>);
const expected = getSymbol('react.forward_ref', 0xf47);
expect(App.$$typeof).to.equal(expected);
});
it('should pass ref with createRef', () => {
let App = forwardRef((_, ref) => <div ref={ref}>foo</div>);
let ref = createRef();
render(<App ref={ref} />, scratch);
expect(ref.current).to.equalNode(scratch.firstChild);
});
it('should share the same ref reference', () => {
let passedRef;
let App = forwardRef((_, ref) => {
passedRef = ref;
return <div ref={ref}>foo</div>;
});
let ref = createRef();
render(<App ref={ref} />, scratch);
expect(ref).to.equal(passedRef);
});
it('should pass ref with a callback', () => {
let App = forwardRef((_, ref) => (
<div>
<span ref={ref}>foo</span>
</div>
));
let ref;
render(<App ref={x => (ref = x)} />, scratch);
expect(ref).to.equalNode(scratch.firstChild.firstChild);
});
it('should forward props', () => {
let spy = sinon.spy();
let App = forwardRef(spy);
render(<App foo="bar" />, scratch);
expect(spy).to.be.calledWithMatch({ foo: 'bar' });
});
it('should support nesting', () => {
let passedRef;
let Inner = forwardRef((_, ref) => {
passedRef = ref;
return <div ref={ref}>inner</div>;
});
let App = forwardRef((_, ref) => <Inner ref={ref} />);
let ref = createRef();
render(<App ref={ref} />, scratch);
expect(ref).to.equal(passedRef);
});
it('should forward null on unmount', () => {
let passedRef;
let App = forwardRef((_, ref) => {
passedRef = ref;
return <div ref={ref}>foo</div>;
});
let ref = createRef();
render(<App ref={ref} />, scratch);
render(null, scratch);
expect(passedRef.current).to.equal(null);
});
it('should be able to render and hydrate forwardRef components', () => {
const Foo = ({ label, forwardedRef }) => (
<div ref={forwardedRef}>{label}</div>
);
const App = forwardRef((props, ref) => (
<Foo {...props} forwardedRef={ref} />
));
const ref = createRef();
const markup = <App ref={ref} label="Hi" />;
const element = document.createElement('div');
element.innerHTML = '<div>Hi</div>';
expect(element.textContent).to.equal('Hi');
expect(ref.current == null).to.equal(true);
hydrate(markup, element);
expect(element.textContent).to.equal('Hi');
expect(ref.current.tagName).to.equal('DIV');
});
it('should update refs when switching between children', () => {
function Foo({ forwardedRef, setRefOnDiv }) {
return (
<section>
<div ref={setRefOnDiv ? forwardedRef : null}>First</div>
<span ref={setRefOnDiv ? null : forwardedRef}>Second</span>
</section>
);
}
const App = forwardRef((props, ref) => (
<Foo {...props} forwardedRef={ref} />
));
const ref = createRef();
render(<App ref={ref} setRefOnDiv={true} />, scratch);
expect(ref.current.nodeName).to.equal('DIV');
render(<App ref={ref} setRefOnDiv={false} />, scratch);
expect(ref.current.nodeName).to.equal('SPAN');
});
it('should support rendering null', () => {
const App = forwardRef(() => null);
const ref = createRef();
render(<App ref={ref} />, scratch);
expect(ref.current == null).to.equal(true);
});
it('should support rendering null for multiple children', () => {
const Foo = forwardRef(() => null);
const ref = createRef();
render(
<div>
<div />
<Foo ref={ref} />
<div />
</div>,
scratch
);
expect(ref.current == null).to.equal(true);
});
it('should support useImperativeHandle', () => {
let setValue;
const Foo = forwardRef((props, ref) => {
const result = useState('');
setValue = result[1];
useImperativeHandle(
ref,
() => ({
getValue: () => result[0]
}),
[result[0]]
);
return <input ref={ref} value={result[0]} />;
});
const ref = createRef();
render(<Foo ref={ref} />, scratch);
expect(typeof ref.current.getValue).to.equal('function');
expect(ref.current.getValue()).to.equal('');
setValue('x');
rerender();
expect(typeof ref.current.getValue).to.equal('function');
expect(ref.current.getValue()).to.equal('x');
});
it('should not bailout if forwardRef is not wrapped in memo', () => {
const Component = props => <div {...props} />;
let renderCount = 0;
const App = forwardRef((props, ref) => {
renderCount++;
return <Component {...props} forwardedRef={ref} />;
});
const ref = createRef();
render(<App ref={ref} optional="foo" />, scratch);
expect(renderCount).to.equal(1);
render(<App ref={ref} optional="foo" />, scratch);
expect(renderCount).to.equal(2);
});
it('should bailout if forwardRef is wrapped in memo', () => {
const Component = props => <div ref={props.forwardedRef} />;
let renderCount = 0;
const App = memo(
forwardRef((props, ref) => {
renderCount++;
return <Component {...props} forwardedRef={ref} />;
})
);
const ref = createRef();
render(<App ref={ref} optional="foo" />, scratch);
expect(renderCount).to.equal(1);
expect(ref.current.nodeName).to.equal('DIV');
render(<App ref={ref} optional="foo" />, scratch);
expect(renderCount).to.equal(1);
const differentRef = createRef();
render(<App ref={differentRef} optional="foo" />, scratch);
expect(renderCount).to.equal(2);
expect(ref.current == null).to.equal(true);
expect(differentRef.current.nodeName).to.equal('DIV');
render(<App ref={ref} optional="bar" />, scratch);
expect(renderCount).to.equal(3);
});
it('should bailout if forwardRef is wrapped in memo using function refs', () => {
const Component = props => <div ref={props.forwardedRef} />;
let renderCount = 0;
const App = memo(
forwardRef((props, ref) => {
renderCount++;
return <Component {...props} forwardedRef={ref} />;
})
);
const ref = sinon.spy();
render(<App ref={ref} optional="foo" />, scratch);
expect(renderCount).to.equal(1);
expect(ref).to.have.been.called;
ref.resetHistory();
render(<App ref={ref} optional="foo" />, scratch);
expect(renderCount).to.equal(1);
const differentRef = sinon.spy();
render(<App ref={differentRef} optional="foo" />, scratch);
expect(renderCount).to.equal(2);
expect(ref).to.have.been.calledWith(null);
expect(differentRef).to.have.been.called;
differentRef.resetHistory();
render(<App ref={ref} optional="bar" />, scratch);
expect(renderCount).to.equal(3);
});
it('should pass ref through memo() with custom comparer function', () => {
const Foo = props => <div ref={props.forwardedRef} />;
let renderCount = 0;
const App = memo(
forwardRef((props, ref) => {
renderCount++;
return <Foo {...props} forwardedRef={ref} />;
}),
(o, p) => o.a === p.a && o.b === p.b
);
const ref = createRef();
render(<App ref={ref} a="0" b="0" c="1" />, scratch);
expect(renderCount).to.equal(1);
expect(ref.current.nodeName).to.equal('DIV');
// Changing either a or b rerenders
render(<App ref={ref} a="0" b="1" c="1" />, scratch);
expect(renderCount).to.equal(2);
// Changing c doesn't rerender
render(<App ref={ref} a="0" b="1" c="2" />, scratch);
expect(renderCount).to.equal(2);
const App2 = memo(App, (o, p) => o.a === p.a && o.c === p.c);
render(<App2 ref={ref} a="0" b="0" c="0" />, scratch);
expect(renderCount).to.equal(3);
// Changing just b no longer updates
render(<App2 ref={ref} a="0" b="1" c="0" />, scratch);
expect(renderCount).to.equal(3);
// Changing just a and c updates
render(<App2 ref={ref} a="2" b="2" c="2" />, scratch);
expect(renderCount).to.equal(4);
// Changing just c does not update
render(<App2 ref={ref} a="2" b="2" c="3" />, scratch);
expect(renderCount).to.equal(4);
// Changing ref still rerenders
const differentRef = createRef();
render(<App2 ref={differentRef} a="2" b="2" c="3" />, scratch);
expect(renderCount).to.equal(5);
expect(ref.current == null).to.equal(true);
expect(differentRef.current.nodeName).to.equal('DIV');
});
it('calls ref when this is a function.', () => {
const spy = sinon.spy();
const Bar = forwardRef((props, ref) => {
useImperativeHandle(ref, () => ({ foo: 100 }));
return null;
});
render(<Bar ref={spy} />, scratch);
expect(spy).to.be.calledOnce;
expect(spy).to.be.calledWithExactly({ foo: 100 });
});
it('stale ref missing with passed useRef', () => {
let _ref = null;
let _set = null;
const Inner = forwardRef((props, ref) => {
const _hook = useState(null);
_ref = ref;
_set = _hook[1];
return <div ref={ref} />;
});
const Parent = () => {
const parentRef = useRef(null);
return <Inner ref={parentRef}>child</Inner>;
};
act(() => {
render(<Parent />, scratch);
});
expect(_ref.current).to.equal(scratch.firstChild);
act(() => {
_set(1);
rerender();
});
expect(_ref.current).to.equal(scratch.firstChild);
});
it('should forward at diff time instead vnode-creation.', () => {
let ref, forceTransition, forceOpen;
const Portal = ({ children, open }) =>
open ? createPortal(children, scratch) : null;
const Wrapper = forwardRef((_props, ref) => <div ref={ref}>Wrapper</div>);
const Transition = ({ children }) => {
const state = useState(0);
forceTransition = state[1];
expect(children.ref).to.not.be.undefined;
if (state[0] === 0) expect(children.props.ref).to.be.undefined;
return children;
};
const App = () => {
const openState = useState(false);
forceOpen = openState[1];
ref = useRef();
return (
<Portal open={openState[0]}>
<Transition>
<Wrapper ref={ref} />
</Transition>
</Portal>
);
};
render(<App />, scratch);
act(() => {
forceOpen(true);
});
expect(ref.current.innerHTML).to.equal('Wrapper');
act(() => {
forceTransition(1);
});
expect(ref.current.innerHTML).to.equal('Wrapper');
});
// Issue #2566
it('should pass null as ref when no ref is present', () => {
let actual;
const App = forwardRef((_, ref) => {
actual = ref;
return <div />;
});
render(<App />, scratch);
expect(actual).to.equal(null);
});
});
| 1 | 15,874 | We can't really test this anymore since ref and props.ref are at the backing node level now | preactjs-preact | js |
@@ -56,6 +56,7 @@ abstract class BaseDataReader<T> implements Closeable {
private CloseableIterator<T> currentIterator;
private T current = null;
+ private FileScanTask currentTask = null;
BaseDataReader(CombinedScanTask task, FileIO io, EncryptionManager encryptionManager) {
this.tasks = task.files().iterator(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.Closeable;
import java.io.IOException;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.stream.Stream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.util.Utf8;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.encryption.EncryptedFiles;
import org.apache.iceberg.encryption.EncryptedInputFile;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.CloseableIterator;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.util.ByteBuffers;
import org.apache.spark.rdd.InputFileBlockHolder;
import org.apache.spark.sql.types.Decimal;
import org.apache.spark.unsafe.types.UTF8String;
/**
* Base class of Spark readers.
*
* @param <T> is the Java class returned by this reader whose objects contain one or more rows.
*/
abstract class BaseDataReader<T> implements Closeable {
private final Iterator<FileScanTask> tasks;
private final Map<String, InputFile> inputFiles;
private CloseableIterator<T> currentIterator;
private T current = null;
BaseDataReader(CombinedScanTask task, FileIO io, EncryptionManager encryptionManager) {
this.tasks = task.files().iterator();
Map<String, ByteBuffer> keyMetadata = Maps.newHashMap();
task.files().stream()
.flatMap(fileScanTask -> Stream.concat(Stream.of(fileScanTask.file()), fileScanTask.deletes().stream()))
.forEach(file -> keyMetadata.put(file.path().toString(), file.keyMetadata()));
Stream<EncryptedInputFile> encrypted = keyMetadata.entrySet().stream()
.map(entry -> EncryptedFiles.encryptedInput(io.newInputFile(entry.getKey()), entry.getValue()));
// decrypt with the batch call to avoid multiple RPCs to a key server, if possible
Iterable<InputFile> decryptedFiles = encryptionManager.decrypt(encrypted::iterator);
Map<String, InputFile> files = Maps.newHashMapWithExpectedSize(task.files().size());
decryptedFiles.forEach(decrypted -> files.putIfAbsent(decrypted.location(), decrypted));
this.inputFiles = Collections.unmodifiableMap(files);
this.currentIterator = CloseableIterator.empty();
}
public boolean next() throws IOException {
while (true) {
if (currentIterator.hasNext()) {
this.current = currentIterator.next();
return true;
} else if (tasks.hasNext()) {
this.currentIterator.close();
this.currentIterator = open(tasks.next());
} else {
this.currentIterator.close();
return false;
}
}
}
public T get() {
return current;
}
abstract CloseableIterator<T> open(FileScanTask task);
@Override
public void close() throws IOException {
InputFileBlockHolder.unset();
// close the current iterator
this.currentIterator.close();
// exhaust the task iterator
while (tasks.hasNext()) {
tasks.next();
}
}
protected InputFile getInputFile(FileScanTask task) {
Preconditions.checkArgument(!task.isDataTask(), "Invalid task type");
return inputFiles.get(task.file().path().toString());
}
protected InputFile getInputFile(String location) {
return inputFiles.get(location);
}
protected static Object convertConstant(Type type, Object value) {
if (value == null) {
return null;
}
switch (type.typeId()) {
case DECIMAL:
return Decimal.apply((BigDecimal) value);
case STRING:
if (value instanceof Utf8) {
Utf8 utf8 = (Utf8) value;
return UTF8String.fromBytes(utf8.getBytes(), 0, utf8.getByteLength());
}
return UTF8String.fromString(value.toString());
case FIXED:
if (value instanceof byte[]) {
return value;
} else if (value instanceof GenericData.Fixed) {
return ((GenericData.Fixed) value).bytes();
}
return ByteBuffers.toByteArray((ByteBuffer) value);
case BINARY:
return ByteBuffers.toByteArray((ByteBuffer) value);
default:
}
return value;
}
}
| 1 | 30,693 | Did you intend to set this in the constructor? | apache-iceberg | java |
@@ -106,11 +106,15 @@ func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken
Role: aws.String(machine.Role()),
})
+ var err error
// Pick image from the machine configuration, or use a default one.
if machine.MachineConfig.AMI.ID != nil {
input.ImageID = *machine.MachineConfig.AMI.ID
} else {
- input.ImageID = s.defaultAMILookup(machine.Region())
+ input.ImageID, err = s.defaultAMILookup("ubuntu", "18.04", machine.Machine.Spec.Versions.Kubelet)
+ if err != nil {
+ return nil, err
+ }
}
// Pick subnet from the machine configuration, or default to the first private available. | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ec2
import (
"encoding/base64"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
"k8s.io/klog"
"sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1alpha1"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/actuators"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/converters"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/filter"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/awserrors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/certificates"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/userdata"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/tags"
"sigs.k8s.io/cluster-api-provider-aws/pkg/record"
)
// InstanceByTags returns the existing instance or nothing if it doesn't exist.
func (s *Service) InstanceByTags(machine *actuators.MachineScope) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Looking for existing instance for machine %q in cluster %q", machine.Name(), s.scope.Name())
input := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
filter.EC2.ClusterOwned(s.scope.Name()),
filter.EC2.Name(machine.Name()),
filter.EC2.InstanceStates(ec2.InstanceStateNamePending, ec2.InstanceStateNameRunning),
},
}
out, err := s.scope.EC2.DescribeInstances(input)
switch {
case awserrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, errors.Wrap(err, "failed to describe instances by tags")
}
// TODO: currently just returns the first matched instance, need to
// better rationalize how to find the right instance to return if multiple
// match
for _, res := range out.Reservations {
for _, inst := range res.Instances {
return converters.SDKToInstance(inst), nil
}
}
return nil, nil
}
// InstanceIfExists returns the existing instance or nothing if it doesn't exist.
func (s *Service) InstanceIfExists(id string) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Looking for instance %q", id)
input := &ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String(id)},
Filters: []*ec2.Filter{filter.EC2.InstanceStates(ec2.InstanceStateNamePending, ec2.InstanceStateNameRunning)},
}
out, err := s.scope.EC2.DescribeInstances(input)
switch {
case awserrors.IsNotFound(err):
return nil, nil
case err != nil:
return nil, errors.Wrapf(err, "failed to describe instance: %q", id)
}
if len(out.Reservations) > 0 && len(out.Reservations[0].Instances) > 0 {
return converters.SDKToInstance(out.Reservations[0].Instances[0]), nil
}
return nil, nil
}
// createInstance runs an ec2 instance.
func (s *Service) createInstance(machine *actuators.MachineScope, bootstrapToken string) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Creating a new instance for machine %q", machine.Name())
input := &v1alpha1.Instance{
Type: machine.MachineConfig.InstanceType,
IAMProfile: machine.MachineConfig.IAMInstanceProfile,
}
input.Tags = tags.Build(tags.BuildParams{
ClusterName: s.scope.Name(),
Lifecycle: tags.ResourceLifecycleOwned,
Name: aws.String(machine.Name()),
Role: aws.String(machine.Role()),
})
// Pick image from the machine configuration, or use a default one.
if machine.MachineConfig.AMI.ID != nil {
input.ImageID = *machine.MachineConfig.AMI.ID
} else {
input.ImageID = s.defaultAMILookup(machine.Region())
}
// Pick subnet from the machine configuration, or default to the first private available.
if machine.MachineConfig.Subnet != nil && machine.MachineConfig.Subnet.ID != nil {
input.SubnetID = *machine.MachineConfig.Subnet.ID
} else {
sns := s.scope.Subnets().FilterPrivate()
if len(sns) == 0 {
return nil, awserrors.NewFailedDependency(
errors.Errorf("failed to run machine %q, no subnets available", machine.Name()),
)
}
input.SubnetID = sns[0].ID
}
if len(s.scope.ClusterConfig.CACertificate) == 0 {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, missing CACertificate"),
)
}
if s.scope.Network().APIServerELB.DNSName == "" {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, APIServer ELB not available"),
)
}
// apply values based on the role of the machine
if machine.Role() == "controlplane" {
if s.scope.SecurityGroups()[v1alpha1.SecurityGroupControlPlane] == nil {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, security group not available"),
)
}
if len(s.scope.ClusterConfig.CAPrivateKey) == 0 {
return nil, awserrors.NewFailedDependency(
errors.New("failed to run controlplane, missing CAPrivateKey"),
)
}
userData, err := userdata.NewControlPlane(&userdata.ControlPlaneInput{
CACert: string(s.scope.ClusterConfig.CACertificate),
CAKey: string(s.scope.ClusterConfig.CAPrivateKey),
ELBAddress: s.scope.Network().APIServerELB.DNSName,
ClusterName: s.scope.Name(),
PodSubnet: s.scope.Cluster.Spec.ClusterNetwork.Pods.CIDRBlocks[0],
ServiceSubnet: s.scope.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks[0],
ServiceDomain: s.scope.Cluster.Spec.ClusterNetwork.ServiceDomain,
KubernetesVersion: machine.Machine.Spec.Versions.ControlPlane,
})
if err != nil {
return input, err
}
input.UserData = aws.String(userData)
input.SecurityGroupIDs = append(input.SecurityGroupIDs, s.scope.SecurityGroups()[v1alpha1.SecurityGroupControlPlane].ID)
}
if machine.Role() == "node" {
input.SecurityGroupIDs = append(input.SecurityGroupIDs, s.scope.SecurityGroups()[v1alpha1.SecurityGroupNode].ID)
caCertHash, err := certificates.GenerateCertificateHash(s.scope.ClusterConfig.CACertificate)
if err != nil {
return input, err
}
userData, err := userdata.NewNode(&userdata.NodeInput{
CACertHash: caCertHash,
BootstrapToken: bootstrapToken,
ELBAddress: s.scope.Network().APIServerELB.DNSName,
})
if err != nil {
return input, err
}
input.UserData = aws.String(userData)
}
// Pick SSH key, if any.
if machine.MachineConfig.KeyName != "" {
input.KeyName = aws.String(machine.MachineConfig.KeyName)
} else {
input.KeyName = aws.String(defaultSSHKeyName)
}
out, err := s.runInstance(machine.Role(), input)
if err != nil {
return nil, err
}
record.Eventf(machine.Machine, "CreatedInstance", "Created new %s instance with id %q", machine.Role(), out.ID)
return out, nil
}
// TerminateInstance terminates an EC2 instance.
// Returns nil on success, error in all other cases.
func (s *Service) TerminateInstance(instanceID string) error {
klog.V(2).Infof("Attempting to terminate instance with id %q", instanceID)
input := &ec2.TerminateInstancesInput{
InstanceIds: aws.StringSlice([]string{instanceID}),
}
if _, err := s.scope.EC2.TerminateInstances(input); err != nil {
return errors.Wrapf(err, "failed to terminate instance with id %q", instanceID)
}
klog.V(2).Infof("Terminated instance with id %q", instanceID)
record.Eventf(s.scope.Cluster, "DeletedInstance", "Terminated instance %q", instanceID)
return nil
}
// TerminateInstanceAndWait terminates and waits
// for an EC2 instance to terminate.
func (s *Service) TerminateInstanceAndWait(instanceID string) error {
if err := s.TerminateInstance(instanceID); err != nil {
return err
}
klog.V(2).Infof("Waiting for EC2 instance with id %q to terminate", instanceID)
input := &ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice([]string{instanceID}),
}
if err := s.scope.EC2.WaitUntilInstanceTerminated(input); err != nil {
return errors.Wrapf(err, "failed to wait for instance %q termination", instanceID)
}
return nil
}
// CreateOrGetMachine will either return an existing instance or create and return an instance.
func (s *Service) CreateOrGetMachine(machine *actuators.MachineScope, bootstrapToken string) (*v1alpha1.Instance, error) {
klog.V(2).Infof("Attempting to create or get machine %q", machine.Name())
// instance id exists, try to get it
if machine.MachineStatus.InstanceID != nil {
klog.V(2).Infof("Looking up machine %q by id %q", machine.Name(), *machine.MachineStatus.InstanceID)
instance, err := s.InstanceIfExists(*machine.MachineStatus.InstanceID)
if err != nil && !awserrors.IsNotFound(err) {
return nil, errors.Wrapf(err, "failed to look up machine %q by id %q", machine.Name(), *machine.MachineStatus.InstanceID)
} else if err == nil && instance != nil {
return instance, nil
}
}
klog.V(2).Infof("Looking up machine %q by tags", machine.Name())
instance, err := s.InstanceByTags(machine)
if err != nil && !awserrors.IsNotFound(err) {
return nil, errors.Wrapf(err, "failed to query machine %q instance by tags", machine.Name())
} else if err == nil && instance != nil {
return instance, nil
}
return s.createInstance(machine, bootstrapToken)
}
func (s *Service) runInstance(role string, i *v1alpha1.Instance) (*v1alpha1.Instance, error) {
input := &ec2.RunInstancesInput{
InstanceType: aws.String(i.Type),
SubnetId: aws.String(i.SubnetID),
ImageId: aws.String(i.ImageID),
KeyName: i.KeyName,
EbsOptimized: i.EBSOptimized,
MaxCount: aws.Int64(1),
MinCount: aws.Int64(1),
UserData: i.UserData,
}
if i.UserData != nil {
input.UserData = aws.String(base64.StdEncoding.EncodeToString([]byte(*i.UserData)))
}
if len(i.SecurityGroupIDs) > 0 {
input.SecurityGroupIds = aws.StringSlice(i.SecurityGroupIDs)
}
if i.IAMProfile != "" {
input.IamInstanceProfile = &ec2.IamInstanceProfileSpecification{
Name: aws.String(i.IAMProfile),
}
}
if len(i.Tags) > 0 {
spec := &ec2.TagSpecification{ResourceType: aws.String(ec2.ResourceTypeInstance)}
for key, value := range i.Tags {
spec.Tags = append(spec.Tags, &ec2.Tag{
Key: aws.String(key),
Value: aws.String(value),
})
}
input.TagSpecifications = append(input.TagSpecifications, spec)
}
out, err := s.scope.EC2.RunInstances(input)
if err != nil {
return nil, errors.Wrapf(err, "failed to run instance: %v", i)
}
if len(out.Instances) == 0 {
return nil, errors.Errorf("no instance returned for reservation %v", out.GoString())
}
s.scope.EC2.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{InstanceIds: []*string{out.Instances[0].InstanceId}})
return converters.SDKToInstance(out.Instances[0]), nil
}
// UpdateInstanceSecurityGroups modifies the security groups of the given
// EC2 instance.
func (s *Service) UpdateInstanceSecurityGroups(instanceID string, ids []string) error {
klog.V(2).Infof("Attempting to update security groups on instance %q", instanceID)
input := &ec2.ModifyInstanceAttributeInput{
InstanceId: aws.String(instanceID),
Groups: aws.StringSlice(ids),
}
if _, err := s.scope.EC2.ModifyInstanceAttribute(input); err != nil {
return errors.Wrapf(err, "failed to modify instance %q security groups", instanceID)
}
return nil
}
// UpdateResourceTags updates the tags for an instance.
// This will be called if there is anything to create (update) or delete.
// We may not always have to perform each action, so we check what we're
// receiving to avoid calling AWS if we don't need to.
func (s *Service) UpdateResourceTags(resourceID *string, create map[string]string, remove map[string]string) error {
klog.V(2).Infof("Attempting to update tags on resource %q", *resourceID)
// If we have anything to create or update
if len(create) > 0 {
klog.V(2).Infof("Attempting to create tags on resource %q", *resourceID)
// Convert our create map into an array of *ec2.Tag
createTagsInput := converters.MapToTags(create)
// Create the CreateTags input.
input := &ec2.CreateTagsInput{
Resources: []*string{resourceID},
Tags: createTagsInput,
}
// Create/Update tags in AWS.
if _, err := s.scope.EC2.CreateTags(input); err != nil {
return errors.Wrapf(err, "failed to create tags for resource %q: %+v", *resourceID, create)
}
}
// If we have anything to remove
if len(remove) > 0 {
klog.V(2).Infof("Attempting to delete tags on resource %q", *resourceID)
// Convert our remove map into an array of *ec2.Tag
removeTagsInput := converters.MapToTags(remove)
// Create the DeleteTags input
input := &ec2.DeleteTagsInput{
Resources: []*string{resourceID},
Tags: removeTagsInput,
}
// Delete tags in AWS.
if _, err := s.scope.EC2.DeleteTags(input); err != nil {
return errors.Wrapf(err, "failed to delete tags for resource %q: %v", *resourceID, remove)
}
}
return nil
}
| 1 | 7,617 | This looks fine as a first approach, and can we put a TODO here about ubuntu 18.04 being hardcoded? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -1,5 +1,6 @@
+#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
-
+#endif // USE_OPENCV
#include <stdint.h>
#include <string> | 1 | #include <opencv2/core/core.hpp>
#include <stdint.h>
#include <string>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/data_layers.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/benchmark.hpp"
#include "caffe/util/io.hpp"
namespace caffe {
template <typename Dtype>
DataLayer<Dtype>::DataLayer(const LayerParameter& param)
: BasePrefetchingDataLayer<Dtype>(param),
reader_(param) {
}
template <typename Dtype>
DataLayer<Dtype>::~DataLayer() {
this->StopInternalThread();
}
template <typename Dtype>
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int batch_size = this->layer_param_.data_param().batch_size();
// Read a data point, and use it to initialize the top blob.
Datum& datum = *(reader_.full().peek());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
// Reshape top[0] and prefetch_data according to the batch_size.
top_shape[0] = batch_size;
top[0]->Reshape(top_shape);
for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
this->prefetch_[i].data_.Reshape(top_shape);
}
LOG(INFO) << "output data size: " << top[0]->num() << ","
<< top[0]->channels() << "," << top[0]->height() << ","
<< top[0]->width();
// label
if (this->output_labels_) {
vector<int> label_shape(1, batch_size);
top[1]->Reshape(label_shape);
for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
this->prefetch_[i].label_.Reshape(label_shape);
}
}
}
// This function is called on prefetch thread
template<typename Dtype>
void DataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
// Reshape according to the first datum of each batch
// on single input batches allows for inputs of varying dimension.
const int batch_size = this->layer_param_.data_param().batch_size();
Datum& datum = *(reader_.full().peek());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape);
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = batch->label_.mutable_cpu_data();
}
for (int item_id = 0; item_id < batch_size; ++item_id) {
timer.Start();
// get a datum
Datum& datum = *(reader_.full().pop("Waiting for data"));
read_time += timer.MicroSeconds();
timer.Start();
// Apply data transformations (mirror, scale, crop...)
int offset = batch->data_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset);
this->data_transformer_->Transform(datum, &(this->transformed_data_));
// Copy label.
if (this->output_labels_) {
top_label[item_id] = datum.label();
}
trans_time += timer.MicroSeconds();
reader_.free().push(const_cast<Datum*>(&datum));
}
timer.Stop();
batch_timer.Stop();
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
INSTANTIATE_CLASS(DataLayer);
REGISTER_LAYER_CLASS(Data);
} // namespace caffe
| 1 | 33,664 | This isn't strictly needed since lmdb / leveldb data that is unencoded does not require OpenCV IO. I think `DataLayer` should only require lmdb / leveldb. If one tries to use encoded data without OpenCV the `DataTransformer` guards will report the dependency failure. | BVLC-caffe | cpp |
@@ -30,7 +30,7 @@ func (r *ReconcileHiveConfig) deployHiveAPI(hLog log.FieldLogger, h *resource.He
err := util.ApplyAsset(h, "config/apiserver/hiveapi-cluster-role-binding.yaml", hLog)
if err != nil {
- return err
+ hLog.WithError(err).Error("unable to apply hiveapi cluster role binding")
}
err = util.ApplyAsset(h, "config/apiserver/service.yaml", hLog) | 1 | package hive
import (
log "github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/kubernetes/scheme"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
"github.com/openshift/hive/pkg/constants"
"github.com/openshift/hive/pkg/operator/assets"
"github.com/openshift/hive/pkg/operator/util"
"github.com/openshift/hive/pkg/resource"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
)
func (r *ReconcileHiveConfig) deployHiveAPI(hLog log.FieldLogger, h *resource.Helper, hiveConfig *hivev1.HiveConfig) error {
if !hiveConfig.Spec.HiveAPIEnabled {
return r.tearDownHiveAPI(hLog)
}
err := util.ApplyAsset(h, "config/apiserver/hiveapi-cluster-role-binding.yaml", hLog)
if err != nil {
return err
}
err = util.ApplyAsset(h, "config/apiserver/service.yaml", hLog)
if err != nil {
return err
}
err = util.ApplyAsset(h, "config/apiserver/service-account.yaml", hLog)
if err != nil {
return err
}
if err := r.createHiveAPIDeployment(hLog, h); err != nil {
return err
}
if err := r.createAPIServerAPIService(hLog, h); err != nil {
return err
}
return nil
}
func (r *ReconcileHiveConfig) createAPIServerAPIService(hLog log.FieldLogger, h *resource.Helper) error {
hLog.Debug("reading apiservice")
asset := assets.MustAsset("config/apiserver/apiservice.yaml")
apiService := util.ReadAPIServiceV1Beta1OrDie(asset, scheme.Scheme)
// If on 3.11 we need to set the service CA on the apiservice
is311, err := r.is311(hLog)
if err != nil {
hLog.Error("error detecting 3.11 cluster")
return err
}
// If we're running on vanilla Kube (mostly devs using kind), or OpenShift 3.x, we
// will not have access to the service cert injection we normally use. Lookup
// the cluster CA and inject into the APIServer.
// NOTE: If this is vanilla kube, you will also need to manually create a certificate
// secret, see hack/hiveapi-dev-cert.sh.
if !r.runningOnOpenShift(hLog) || is311 {
hLog.Debug("non-OpenShift 4.x cluster detected, modifying apiservice")
serviceCA, _, err := r.getCACerts(hLog)
if err != nil {
return err
}
apiService.Spec.CABundle = serviceCA
}
result, err := h.ApplyRuntimeObject(apiService, scheme.Scheme)
if err != nil {
hLog.WithError(err).Error("error applying apiservice")
return err
}
hLog.Infof("apiservice applied (%s)", result)
return nil
}
func (r *ReconcileHiveConfig) createHiveAPIDeployment(hLog log.FieldLogger, h *resource.Helper) error {
asset := assets.MustAsset("config/apiserver/deployment.yaml")
hLog.Debug("reading deployment")
hiveAPIDeployment := resourceread.ReadDeploymentV1OrDie(asset)
if r.hiveImage != "" {
hiveAPIDeployment.Spec.Template.Spec.Containers[0].Image = r.hiveImage
}
if r.hiveImagePullPolicy != "" {
hiveAPIDeployment.Spec.Template.Spec.Containers[0].ImagePullPolicy = r.hiveImagePullPolicy
}
result, err := h.ApplyRuntimeObject(hiveAPIDeployment, scheme.Scheme)
if err != nil {
hLog.WithError(err).Error("error applying deployment")
return err
}
hLog.Infof("hiveapi deployment applied (%s)", result)
return nil
}
func (r *ReconcileHiveConfig) tearDownHiveAPI(hLog log.FieldLogger) error {
objects := []struct {
key client.ObjectKey
object runtime.Object
}{
{
key: client.ObjectKey{Namespace: constants.HiveNamespace, Name: "hiveapi"},
object: &appsv1.Deployment{},
},
{
key: client.ObjectKey{Name: "v1alpha1.hive.openshift.io"},
object: &apiregistrationv1.APIService{},
},
{
key: client.ObjectKey{Namespace: constants.HiveNamespace, Name: "hiveapi"},
object: &corev1.Service{},
},
{
key: client.ObjectKey{Namespace: constants.HiveNamespace, Name: "hiveapi-sa"},
object: &corev1.ServiceAccount{},
},
}
errorList := []error{}
for _, obj := range objects {
if err := resource.DeleteAnyExistingObject(r, obj.key, obj.object, hLog); err != nil {
errorList = append(errorList, err)
hLog.WithError(err).Warn("failed to clean up old aggregated API server object")
}
}
return errors.NewAggregate(errorList)
}
| 1 | 10,584 | Should we take out this change? This was only in there to try to brute force the start of the apiserver yesterday. | openshift-hive | go |
@@ -33,6 +33,7 @@ class Proposal < ActiveRecord::Base
has_many :steps
has_many :individual_steps, ->{ individual }, class_name: 'Steps::Individual'
has_many :approvers, through: :individual_steps, source: :user
+ has_many :completers, through: :individual_steps, source: :completer
has_many :api_tokens, through: :individual_steps
has_many :attachments, dependent: :destroy
has_many :approval_delegates, through: :approvers, source: :outgoing_delegations | 1 | class Proposal < ActiveRecord::Base
include WorkflowModel
include ValueHelper
include StepManager
has_paper_trail class_name: 'C2Version'
CLIENT_MODELS = [] # this gets populated later
FLOWS = %w(parallel linear).freeze
workflow do
state :pending do
event :approve, transitions_to: :approved
event :restart, transitions_to: :pending
event :cancel, transitions_to: :cancelled
end
state :approved do
event :restart, transitions_to: :pending
event :cancel, transitions_to: :cancelled
event :approve, transitions_to: :approved do
halt # no need to trigger a state transition
end
end
state :cancelled do
event :approve, transitions_to: :cancelled do
halt # can't escape
end
end
end
acts_as_taggable
has_many :steps
has_many :individual_steps, ->{ individual }, class_name: 'Steps::Individual'
has_many :approvers, through: :individual_steps, source: :user
has_many :api_tokens, through: :individual_steps
has_many :attachments, dependent: :destroy
has_many :approval_delegates, through: :approvers, source: :outgoing_delegations
has_many :comments, dependent: :destroy
has_many :delegates, through: :approval_delegates, source: :assignee
has_many :observations, -> { where("proposal_roles.role_id in (select roles.id from roles where roles.name='observer')") }
has_many :observers, through: :observations, source: :user
belongs_to :client_data, polymorphic: true, dependent: :destroy
belongs_to :requester, class_name: 'User'
delegate :client_slug, to: :client_data, allow_nil: true
validates :client_data_type, inclusion: {
in: ->(_) { self.client_model_names },
message: "%{value} is not a valid client model type. Valid client model types are: #{CLIENT_MODELS.inspect}",
allow_blank: true
}
validates :flow, presence: true, inclusion: {in: FLOWS}
validates :requester_id, presence: true
validates :public_id, uniqueness: true, allow_nil: true
self.statuses.each do |status|
scope status, -> { where(status: status) }
end
scope :closed, -> { where(status: ['approved', 'cancelled']) } #TODO: Backfill to change approvals in 'reject' status to 'cancelled' status
scope :cancelled, -> { where(status: 'cancelled') }
# @todo - this should probably be the only entry into the approval system
def root_step
steps.where(parent: nil).first
end
def parallel?
flow == "parallel"
end
def linear?
flow == "linear"
end
def delegate?(user)
approval_delegates.exists?(assignee_id: user.id)
end
def existing_approval_for(user)
where_clause = <<-SQL
user_id = :user_id
OR user_id IN (SELECT assigner_id FROM approval_delegates WHERE assignee_id = :user_id)
OR user_id IN (SELECT assignee_id FROM approval_delegates WHERE assigner_id = :user_id)
SQL
steps.where(where_clause, user_id: user.id).first
end
def subscribers
results = approvers + observers + delegates + [requester]
results.compact.uniq
end
def subscribers_except_delegates
subscribers - delegates
end
def reset_status
unless cancelled?
if root_step.nil? || root_step.approved?
update(status: "approved")
else
update(status: "pending")
end
end
end
def has_subscriber?(user)
subscribers.include?(user)
end
def existing_observation_for(user)
observations.find_by(user: user)
end
def eligible_observers
if observations.count > 0
User.where(client_slug: client_slug).where('id not in (?)', observations.pluck('user_id'))
else
User.where(client_slug: client_slug)
end
end
def add_observer(email_address, adder=nil, reason=nil)
user = User.for_email_with_slug(email_address, client_slug)
# this authz check is here instead of in a Policy because the Policy classes
# are applied to the current_user, not (as in this case) the user being acted upon.
if client_data && !client_data.slug_matches?(user) && !user.admin?
fail Pundit::NotAuthorizedError.new("May not add observer belonging to a different organization.")
end
unless existing_observation_for(user)
create_new_observation(user, adder, reason)
end
end
def add_requester(email)
user = User.for_email(email)
if awaiting_approver?(user)
fail "#{email} is an approver on this Proposal -- cannot also be Requester"
end
set_requester(user)
end
def set_requester(user)
update(requester: user)
end
def name
if client_data
client_data.public_send(:name)
end
end
def fields_for_display
if client_data
client_data.public_send(:fields_for_display)
else
[]
end
end
# Be careful if altering the identifier. You run the risk of "expiring" all
# pending approval emails
def version
[
updated_at.to_i,
client_data.try(:version)
].compact.max
end
def restart
individual_steps.each(&:restart!)
if root_step
root_step.initialize!
end
Dispatcher.deliver_new_proposal_emails(self)
end
# Returns True if the user is an "active" approver and has acted on the proposal
def is_active_approver?(user)
individual_steps.non_pending.exists?(user: user)
end
def self.client_model_names
CLIENT_MODELS.map(&:to_s)
end
def self.client_slugs
CLIENT_MODELS.map(&:client_slug)
end
private
def create_new_observation(user, adder, reason)
ObservationCreator.new(
observer: user,
proposal_id: id,
reason: reason,
observer_adder: adder
).run
end
end
| 1 | 15,843 | I worry that this association name is a bit confusing. Yoz was asking me about the concept of completion yesterday, which is why I think of it. It wasn't clear to him exactly what it was. what about calling this a `step_completer` ? we know that a proposal has many steps, so I think that might be clearer. in the future, we also might want the rename the `approver` relation because a step is not always an approval these days. | 18F-C2 | rb |
@@ -11,7 +11,6 @@ namespace AutoRest.Swagger.Validation
public class OperationParametersValidation : TypedRule<SwaggerParameter>
{
private const string SubscriptionId = "subscriptionid";
- private const string ApiVersion = "api-version";
/// <summary>
/// Id of the Rule. | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using AutoRest.Core.Logging;
using AutoRest.Core.Properties;
using AutoRest.Swagger.Validation.Core;
using AutoRest.Swagger.Model;
namespace AutoRest.Swagger.Validation
{
public class OperationParametersValidation : TypedRule<SwaggerParameter>
{
private const string SubscriptionId = "subscriptionid";
private const string ApiVersion = "api-version";
/// <summary>
/// Id of the Rule.
/// </summary>
public override string Id => "M2014";
/// <summary>
/// Violation category of the Rule.
/// </summary>
public override ValidationCategory ValidationCategory => ValidationCategory.SDKViolation;
/// <summary>
/// This rule passes if the parameters are not subscriptionId or api-version
/// </summary>
/// <param name="paths"></param>
/// <returns></returns>
public override bool IsValid(SwaggerParameter Parameter) =>
(!string.IsNullOrEmpty(Parameter.Reference) ||Parameter?.Schema != null || !(Parameter?.Name?.ToLower().Equals(SubscriptionId) == true || Parameter?.Name?.ToLower().Equals(ApiVersion) == true));
/// <summary>
/// The template message for this Rule.
/// </summary>
/// <remarks>
/// This may contain placeholders '{0}' for parameterized messages.
/// </remarks>
public override string MessageTemplate => Resources.OperationParametersNotAllowedMessage;
/// <summary>
/// The severity of this message (ie, debug/info/warning/error/fatal, etc)
/// </summary>
public override Category Severity => Category.Error;
}
}
| 1 | 24,560 | Just add a to-do saying we need to add api-version in the check some time in the future when we enable the single-swagger spec mode for validation. | Azure-autorest | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.