repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package xray
import (
"bytes"
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/service/xray/types"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/encoding/httpbinding"
smithyjson "github.com/aws/smithy-go/encoding/json"
"github.com/aws/smithy-go/middleware"
smithytime "github.com/aws/smithy-go/time"
smithyhttp "github.com/aws/smithy-go/transport/http"
"math"
)
type awsRestjson1_serializeOpBatchGetTraces struct {
}
func (*awsRestjson1_serializeOpBatchGetTraces) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpBatchGetTraces) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*BatchGetTracesInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/Traces")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentBatchGetTracesInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsBatchGetTracesInput(v *BatchGetTracesInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentBatchGetTracesInput(v *BatchGetTracesInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.TraceIds != nil {
ok := object.Key("TraceIds")
if err := awsRestjson1_serializeDocumentTraceIdList(v.TraceIds, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpCreateGroup struct {
}
func (*awsRestjson1_serializeOpCreateGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpCreateGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*CreateGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/CreateGroup")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentCreateGroupInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsCreateGroupInput(v *CreateGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentCreateGroupInput(v *CreateGroupInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.FilterExpression != nil {
ok := object.Key("FilterExpression")
ok.String(*v.FilterExpression)
}
if v.GroupName != nil {
ok := object.Key("GroupName")
ok.String(*v.GroupName)
}
if v.InsightsConfiguration != nil {
ok := object.Key("InsightsConfiguration")
if err := awsRestjson1_serializeDocumentInsightsConfiguration(v.InsightsConfiguration, ok); err != nil {
return err
}
}
if v.Tags != nil {
ok := object.Key("Tags")
if err := awsRestjson1_serializeDocumentTagList(v.Tags, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpCreateSamplingRule struct {
}
func (*awsRestjson1_serializeOpCreateSamplingRule) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpCreateSamplingRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*CreateSamplingRuleInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/CreateSamplingRule")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentCreateSamplingRuleInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsCreateSamplingRuleInput(v *CreateSamplingRuleInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentCreateSamplingRuleInput(v *CreateSamplingRuleInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.SamplingRule != nil {
ok := object.Key("SamplingRule")
if err := awsRestjson1_serializeDocumentSamplingRule(v.SamplingRule, ok); err != nil {
return err
}
}
if v.Tags != nil {
ok := object.Key("Tags")
if err := awsRestjson1_serializeDocumentTagList(v.Tags, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpDeleteGroup struct {
}
func (*awsRestjson1_serializeOpDeleteGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpDeleteGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*DeleteGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/DeleteGroup")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentDeleteGroupInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsDeleteGroupInput(v *DeleteGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentDeleteGroupInput(v *DeleteGroupInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.GroupARN != nil {
ok := object.Key("GroupARN")
ok.String(*v.GroupARN)
}
if v.GroupName != nil {
ok := object.Key("GroupName")
ok.String(*v.GroupName)
}
return nil
}
type awsRestjson1_serializeOpDeleteResourcePolicy struct {
}
func (*awsRestjson1_serializeOpDeleteResourcePolicy) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpDeleteResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*DeleteResourcePolicyInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/DeleteResourcePolicy")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentDeleteResourcePolicyInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsDeleteResourcePolicyInput(v *DeleteResourcePolicyInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentDeleteResourcePolicyInput(v *DeleteResourcePolicyInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.PolicyName != nil {
ok := object.Key("PolicyName")
ok.String(*v.PolicyName)
}
if v.PolicyRevisionId != nil {
ok := object.Key("PolicyRevisionId")
ok.String(*v.PolicyRevisionId)
}
return nil
}
type awsRestjson1_serializeOpDeleteSamplingRule struct {
}
func (*awsRestjson1_serializeOpDeleteSamplingRule) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpDeleteSamplingRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*DeleteSamplingRuleInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/DeleteSamplingRule")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentDeleteSamplingRuleInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsDeleteSamplingRuleInput(v *DeleteSamplingRuleInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentDeleteSamplingRuleInput(v *DeleteSamplingRuleInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.RuleARN != nil {
ok := object.Key("RuleARN")
ok.String(*v.RuleARN)
}
if v.RuleName != nil {
ok := object.Key("RuleName")
ok.String(*v.RuleName)
}
return nil
}
type awsRestjson1_serializeOpGetEncryptionConfig struct {
}
func (*awsRestjson1_serializeOpGetEncryptionConfig) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetEncryptionConfig) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetEncryptionConfigInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/EncryptionConfig")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetEncryptionConfigInput(v *GetEncryptionConfigInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
type awsRestjson1_serializeOpGetGroup struct {
}
func (*awsRestjson1_serializeOpGetGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/GetGroup")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetGroupInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetGroupInput(v *GetGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetGroupInput(v *GetGroupInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.GroupARN != nil {
ok := object.Key("GroupARN")
ok.String(*v.GroupARN)
}
if v.GroupName != nil {
ok := object.Key("GroupName")
ok.String(*v.GroupName)
}
return nil
}
type awsRestjson1_serializeOpGetGroups struct {
}
func (*awsRestjson1_serializeOpGetGroups) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetGroupsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/Groups")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetGroupsInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetGroupsInput(v *GetGroupsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetGroupsInput(v *GetGroupsInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpGetInsight struct {
}
func (*awsRestjson1_serializeOpGetInsight) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetInsight) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetInsightInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/Insight")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetInsightInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetInsightInput(v *GetInsightInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetInsightInput(v *GetInsightInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.InsightId != nil {
ok := object.Key("InsightId")
ok.String(*v.InsightId)
}
return nil
}
type awsRestjson1_serializeOpGetInsightEvents struct {
}
func (*awsRestjson1_serializeOpGetInsightEvents) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetInsightEvents) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetInsightEventsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/InsightEvents")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetInsightEventsInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetInsightEventsInput(v *GetInsightEventsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetInsightEventsInput(v *GetInsightEventsInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.InsightId != nil {
ok := object.Key("InsightId")
ok.String(*v.InsightId)
}
if v.MaxResults != nil {
ok := object.Key("MaxResults")
ok.Integer(*v.MaxResults)
}
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpGetInsightImpactGraph struct {
}
func (*awsRestjson1_serializeOpGetInsightImpactGraph) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetInsightImpactGraph) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetInsightImpactGraphInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/InsightImpactGraph")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetInsightImpactGraphInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetInsightImpactGraphInput(v *GetInsightImpactGraphInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetInsightImpactGraphInput(v *GetInsightImpactGraphInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.EndTime != nil {
ok := object.Key("EndTime")
ok.Double(smithytime.FormatEpochSeconds(*v.EndTime))
}
if v.InsightId != nil {
ok := object.Key("InsightId")
ok.String(*v.InsightId)
}
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.StartTime != nil {
ok := object.Key("StartTime")
ok.Double(smithytime.FormatEpochSeconds(*v.StartTime))
}
return nil
}
type awsRestjson1_serializeOpGetInsightSummaries struct {
}
func (*awsRestjson1_serializeOpGetInsightSummaries) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetInsightSummaries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetInsightSummariesInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/InsightSummaries")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetInsightSummariesInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetInsightSummariesInput(v *GetInsightSummariesInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetInsightSummariesInput(v *GetInsightSummariesInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.EndTime != nil {
ok := object.Key("EndTime")
ok.Double(smithytime.FormatEpochSeconds(*v.EndTime))
}
if v.GroupARN != nil {
ok := object.Key("GroupARN")
ok.String(*v.GroupARN)
}
if v.GroupName != nil {
ok := object.Key("GroupName")
ok.String(*v.GroupName)
}
if v.MaxResults != nil {
ok := object.Key("MaxResults")
ok.Integer(*v.MaxResults)
}
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.StartTime != nil {
ok := object.Key("StartTime")
ok.Double(smithytime.FormatEpochSeconds(*v.StartTime))
}
if v.States != nil {
ok := object.Key("States")
if err := awsRestjson1_serializeDocumentInsightStateList(v.States, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpGetSamplingRules struct {
}
func (*awsRestjson1_serializeOpGetSamplingRules) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetSamplingRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetSamplingRulesInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/GetSamplingRules")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetSamplingRulesInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetSamplingRulesInput(v *GetSamplingRulesInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetSamplingRulesInput(v *GetSamplingRulesInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpGetSamplingStatisticSummaries struct {
}
func (*awsRestjson1_serializeOpGetSamplingStatisticSummaries) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetSamplingStatisticSummaries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetSamplingStatisticSummariesInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/SamplingStatisticSummaries")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetSamplingStatisticSummariesInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetSamplingStatisticSummariesInput(v *GetSamplingStatisticSummariesInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetSamplingStatisticSummariesInput(v *GetSamplingStatisticSummariesInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpGetSamplingTargets struct {
}
func (*awsRestjson1_serializeOpGetSamplingTargets) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetSamplingTargets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetSamplingTargetsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/SamplingTargets")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetSamplingTargetsInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetSamplingTargetsInput(v *GetSamplingTargetsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetSamplingTargetsInput(v *GetSamplingTargetsInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.SamplingStatisticsDocuments != nil {
ok := object.Key("SamplingStatisticsDocuments")
if err := awsRestjson1_serializeDocumentSamplingStatisticsDocumentList(v.SamplingStatisticsDocuments, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpGetServiceGraph struct {
}
func (*awsRestjson1_serializeOpGetServiceGraph) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetServiceGraph) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetServiceGraphInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/ServiceGraph")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetServiceGraphInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetServiceGraphInput(v *GetServiceGraphInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetServiceGraphInput(v *GetServiceGraphInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.EndTime != nil {
ok := object.Key("EndTime")
ok.Double(smithytime.FormatEpochSeconds(*v.EndTime))
}
if v.GroupARN != nil {
ok := object.Key("GroupARN")
ok.String(*v.GroupARN)
}
if v.GroupName != nil {
ok := object.Key("GroupName")
ok.String(*v.GroupName)
}
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.StartTime != nil {
ok := object.Key("StartTime")
ok.Double(smithytime.FormatEpochSeconds(*v.StartTime))
}
return nil
}
type awsRestjson1_serializeOpGetTimeSeriesServiceStatistics struct {
}
func (*awsRestjson1_serializeOpGetTimeSeriesServiceStatistics) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetTimeSeriesServiceStatistics) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetTimeSeriesServiceStatisticsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/TimeSeriesServiceStatistics")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetTimeSeriesServiceStatisticsInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetTimeSeriesServiceStatisticsInput(v *GetTimeSeriesServiceStatisticsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetTimeSeriesServiceStatisticsInput(v *GetTimeSeriesServiceStatisticsInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.EndTime != nil {
ok := object.Key("EndTime")
ok.Double(smithytime.FormatEpochSeconds(*v.EndTime))
}
if v.EntitySelectorExpression != nil {
ok := object.Key("EntitySelectorExpression")
ok.String(*v.EntitySelectorExpression)
}
if v.ForecastStatistics != nil {
ok := object.Key("ForecastStatistics")
ok.Boolean(*v.ForecastStatistics)
}
if v.GroupARN != nil {
ok := object.Key("GroupARN")
ok.String(*v.GroupARN)
}
if v.GroupName != nil {
ok := object.Key("GroupName")
ok.String(*v.GroupName)
}
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.Period != nil {
ok := object.Key("Period")
ok.Integer(*v.Period)
}
if v.StartTime != nil {
ok := object.Key("StartTime")
ok.Double(smithytime.FormatEpochSeconds(*v.StartTime))
}
return nil
}
type awsRestjson1_serializeOpGetTraceGraph struct {
}
func (*awsRestjson1_serializeOpGetTraceGraph) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetTraceGraph) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetTraceGraphInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/TraceGraph")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetTraceGraphInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetTraceGraphInput(v *GetTraceGraphInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetTraceGraphInput(v *GetTraceGraphInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.TraceIds != nil {
ok := object.Key("TraceIds")
if err := awsRestjson1_serializeDocumentTraceIdList(v.TraceIds, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpGetTraceSummaries struct {
}
func (*awsRestjson1_serializeOpGetTraceSummaries) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpGetTraceSummaries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*GetTraceSummariesInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/TraceSummaries")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentGetTraceSummariesInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetTraceSummariesInput(v *GetTraceSummariesInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentGetTraceSummariesInput(v *GetTraceSummariesInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.EndTime != nil {
ok := object.Key("EndTime")
ok.Double(smithytime.FormatEpochSeconds(*v.EndTime))
}
if v.FilterExpression != nil {
ok := object.Key("FilterExpression")
ok.String(*v.FilterExpression)
}
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.Sampling != nil {
ok := object.Key("Sampling")
ok.Boolean(*v.Sampling)
}
if v.SamplingStrategy != nil {
ok := object.Key("SamplingStrategy")
if err := awsRestjson1_serializeDocumentSamplingStrategy(v.SamplingStrategy, ok); err != nil {
return err
}
}
if v.StartTime != nil {
ok := object.Key("StartTime")
ok.Double(smithytime.FormatEpochSeconds(*v.StartTime))
}
if len(v.TimeRangeType) > 0 {
ok := object.Key("TimeRangeType")
ok.String(string(v.TimeRangeType))
}
return nil
}
type awsRestjson1_serializeOpListResourcePolicies struct {
}
func (*awsRestjson1_serializeOpListResourcePolicies) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpListResourcePolicies) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*ListResourcePoliciesInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/ListResourcePolicies")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentListResourcePoliciesInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListResourcePoliciesInput(v *ListResourcePoliciesInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentListResourcePoliciesInput(v *ListResourcePoliciesInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
return nil
}
type awsRestjson1_serializeOpListTagsForResource struct {
}
func (*awsRestjson1_serializeOpListTagsForResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*ListTagsForResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/ListTagsForResource")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(v *ListTagsForResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.NextToken != nil {
ok := object.Key("NextToken")
ok.String(*v.NextToken)
}
if v.ResourceARN != nil {
ok := object.Key("ResourceARN")
ok.String(*v.ResourceARN)
}
return nil
}
type awsRestjson1_serializeOpPutEncryptionConfig struct {
}
func (*awsRestjson1_serializeOpPutEncryptionConfig) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpPutEncryptionConfig) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*PutEncryptionConfigInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/PutEncryptionConfig")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentPutEncryptionConfigInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsPutEncryptionConfigInput(v *PutEncryptionConfigInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentPutEncryptionConfigInput(v *PutEncryptionConfigInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.KeyId != nil {
ok := object.Key("KeyId")
ok.String(*v.KeyId)
}
if len(v.Type) > 0 {
ok := object.Key("Type")
ok.String(string(v.Type))
}
return nil
}
type awsRestjson1_serializeOpPutResourcePolicy struct {
}
func (*awsRestjson1_serializeOpPutResourcePolicy) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpPutResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*PutResourcePolicyInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/PutResourcePolicy")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentPutResourcePolicyInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsPutResourcePolicyInput(v *PutResourcePolicyInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentPutResourcePolicyInput(v *PutResourcePolicyInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.BypassPolicyLockoutCheck {
ok := object.Key("BypassPolicyLockoutCheck")
ok.Boolean(v.BypassPolicyLockoutCheck)
}
if v.PolicyDocument != nil {
ok := object.Key("PolicyDocument")
ok.String(*v.PolicyDocument)
}
if v.PolicyName != nil {
ok := object.Key("PolicyName")
ok.String(*v.PolicyName)
}
if v.PolicyRevisionId != nil {
ok := object.Key("PolicyRevisionId")
ok.String(*v.PolicyRevisionId)
}
return nil
}
type awsRestjson1_serializeOpPutTelemetryRecords struct {
}
func (*awsRestjson1_serializeOpPutTelemetryRecords) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpPutTelemetryRecords) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*PutTelemetryRecordsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/TelemetryRecords")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentPutTelemetryRecordsInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsPutTelemetryRecordsInput(v *PutTelemetryRecordsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentPutTelemetryRecordsInput(v *PutTelemetryRecordsInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.EC2InstanceId != nil {
ok := object.Key("EC2InstanceId")
ok.String(*v.EC2InstanceId)
}
if v.Hostname != nil {
ok := object.Key("Hostname")
ok.String(*v.Hostname)
}
if v.ResourceARN != nil {
ok := object.Key("ResourceARN")
ok.String(*v.ResourceARN)
}
if v.TelemetryRecords != nil {
ok := object.Key("TelemetryRecords")
if err := awsRestjson1_serializeDocumentTelemetryRecordList(v.TelemetryRecords, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpPutTraceSegments struct {
}
func (*awsRestjson1_serializeOpPutTraceSegments) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpPutTraceSegments) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*PutTraceSegmentsInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/TraceSegments")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentPutTraceSegmentsInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsPutTraceSegmentsInput(v *PutTraceSegmentsInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentPutTraceSegmentsInput(v *PutTraceSegmentsInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.TraceSegmentDocuments != nil {
ok := object.Key("TraceSegmentDocuments")
if err := awsRestjson1_serializeDocumentTraceSegmentDocumentList(v.TraceSegmentDocuments, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpTagResource struct {
}
func (*awsRestjson1_serializeOpTagResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*TagResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/TagResource")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsTagResourceInput(v *TagResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.ResourceARN != nil {
ok := object.Key("ResourceARN")
ok.String(*v.ResourceARN)
}
if v.Tags != nil {
ok := object.Key("Tags")
if err := awsRestjson1_serializeDocumentTagList(v.Tags, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpUntagResource struct {
}
func (*awsRestjson1_serializeOpUntagResource) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*UntagResourceInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/UntagResource")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsUntagResourceInput(v *UntagResourceInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.ResourceARN != nil {
ok := object.Key("ResourceARN")
ok.String(*v.ResourceARN)
}
if v.TagKeys != nil {
ok := object.Key("TagKeys")
if err := awsRestjson1_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpUpdateGroup struct {
}
func (*awsRestjson1_serializeOpUpdateGroup) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpUpdateGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*UpdateGroupInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/UpdateGroup")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentUpdateGroupInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsUpdateGroupInput(v *UpdateGroupInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentUpdateGroupInput(v *UpdateGroupInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.FilterExpression != nil {
ok := object.Key("FilterExpression")
ok.String(*v.FilterExpression)
}
if v.GroupARN != nil {
ok := object.Key("GroupARN")
ok.String(*v.GroupARN)
}
if v.GroupName != nil {
ok := object.Key("GroupName")
ok.String(*v.GroupName)
}
if v.InsightsConfiguration != nil {
ok := object.Key("InsightsConfiguration")
if err := awsRestjson1_serializeDocumentInsightsConfiguration(v.InsightsConfiguration, ok); err != nil {
return err
}
}
return nil
}
type awsRestjson1_serializeOpUpdateSamplingRule struct {
}
func (*awsRestjson1_serializeOpUpdateSamplingRule) ID() string {
return "OperationSerializer"
}
func (m *awsRestjson1_serializeOpUpdateSamplingRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
}
input, ok := in.Parameters.(*UpdateSamplingRuleInput)
_ = input
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
}
opPath, opQuery := httpbinding.SplitURI("/UpdateSamplingRule")
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
restEncoder.SetHeader("Content-Type").String("application/json")
jsonEncoder := smithyjson.NewEncoder()
if err := awsRestjson1_serializeOpDocumentUpdateSamplingRuleInput(input, jsonEncoder.Value); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
if request.Request, err = restEncoder.Encode(request.Request); err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
in.Request = request
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsUpdateSamplingRuleInput(v *UpdateSamplingRuleInput, encoder *httpbinding.Encoder) error {
if v == nil {
return fmt.Errorf("unsupported serialization of nil %T", v)
}
return nil
}
func awsRestjson1_serializeOpDocumentUpdateSamplingRuleInput(v *UpdateSamplingRuleInput, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.SamplingRuleUpdate != nil {
ok := object.Key("SamplingRuleUpdate")
if err := awsRestjson1_serializeDocumentSamplingRuleUpdate(v.SamplingRuleUpdate, ok); err != nil {
return err
}
}
return nil
}
func awsRestjson1_serializeDocumentAttributeMap(v map[string]string, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
for key := range v {
om := object.Key(key)
om.String(v[key])
}
return nil
}
func awsRestjson1_serializeDocumentBackendConnectionErrors(v *types.BackendConnectionErrors, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.ConnectionRefusedCount != nil {
ok := object.Key("ConnectionRefusedCount")
ok.Integer(*v.ConnectionRefusedCount)
}
if v.HTTPCode4XXCount != nil {
ok := object.Key("HTTPCode4XXCount")
ok.Integer(*v.HTTPCode4XXCount)
}
if v.HTTPCode5XXCount != nil {
ok := object.Key("HTTPCode5XXCount")
ok.Integer(*v.HTTPCode5XXCount)
}
if v.OtherCount != nil {
ok := object.Key("OtherCount")
ok.Integer(*v.OtherCount)
}
if v.TimeoutCount != nil {
ok := object.Key("TimeoutCount")
ok.Integer(*v.TimeoutCount)
}
if v.UnknownHostCount != nil {
ok := object.Key("UnknownHostCount")
ok.Integer(*v.UnknownHostCount)
}
return nil
}
func awsRestjson1_serializeDocumentInsightsConfiguration(v *types.InsightsConfiguration, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.InsightsEnabled != nil {
ok := object.Key("InsightsEnabled")
ok.Boolean(*v.InsightsEnabled)
}
if v.NotificationsEnabled != nil {
ok := object.Key("NotificationsEnabled")
ok.Boolean(*v.NotificationsEnabled)
}
return nil
}
func awsRestjson1_serializeDocumentInsightStateList(v []types.InsightState, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
for i := range v {
av := array.Value()
av.String(string(v[i]))
}
return nil
}
func awsRestjson1_serializeDocumentSamplingRule(v *types.SamplingRule, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.Attributes != nil {
ok := object.Key("Attributes")
if err := awsRestjson1_serializeDocumentAttributeMap(v.Attributes, ok); err != nil {
return err
}
}
{
ok := object.Key("FixedRate")
switch {
case math.IsNaN(v.FixedRate):
ok.String("NaN")
case math.IsInf(v.FixedRate, 1):
ok.String("Infinity")
case math.IsInf(v.FixedRate, -1):
ok.String("-Infinity")
default:
ok.Double(v.FixedRate)
}
}
if v.Host != nil {
ok := object.Key("Host")
ok.String(*v.Host)
}
if v.HTTPMethod != nil {
ok := object.Key("HTTPMethod")
ok.String(*v.HTTPMethod)
}
{
ok := object.Key("Priority")
ok.Integer(v.Priority)
}
{
ok := object.Key("ReservoirSize")
ok.Integer(v.ReservoirSize)
}
if v.ResourceARN != nil {
ok := object.Key("ResourceARN")
ok.String(*v.ResourceARN)
}
if v.RuleARN != nil {
ok := object.Key("RuleARN")
ok.String(*v.RuleARN)
}
if v.RuleName != nil {
ok := object.Key("RuleName")
ok.String(*v.RuleName)
}
if v.ServiceName != nil {
ok := object.Key("ServiceName")
ok.String(*v.ServiceName)
}
if v.ServiceType != nil {
ok := object.Key("ServiceType")
ok.String(*v.ServiceType)
}
if v.URLPath != nil {
ok := object.Key("URLPath")
ok.String(*v.URLPath)
}
{
ok := object.Key("Version")
ok.Integer(v.Version)
}
return nil
}
func awsRestjson1_serializeDocumentSamplingRuleUpdate(v *types.SamplingRuleUpdate, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.Attributes != nil {
ok := object.Key("Attributes")
if err := awsRestjson1_serializeDocumentAttributeMap(v.Attributes, ok); err != nil {
return err
}
}
if v.FixedRate != nil {
ok := object.Key("FixedRate")
switch {
case math.IsNaN(*v.FixedRate):
ok.String("NaN")
case math.IsInf(*v.FixedRate, 1):
ok.String("Infinity")
case math.IsInf(*v.FixedRate, -1):
ok.String("-Infinity")
default:
ok.Double(*v.FixedRate)
}
}
if v.Host != nil {
ok := object.Key("Host")
ok.String(*v.Host)
}
if v.HTTPMethod != nil {
ok := object.Key("HTTPMethod")
ok.String(*v.HTTPMethod)
}
if v.Priority != nil {
ok := object.Key("Priority")
ok.Integer(*v.Priority)
}
if v.ReservoirSize != nil {
ok := object.Key("ReservoirSize")
ok.Integer(*v.ReservoirSize)
}
if v.ResourceARN != nil {
ok := object.Key("ResourceARN")
ok.String(*v.ResourceARN)
}
if v.RuleARN != nil {
ok := object.Key("RuleARN")
ok.String(*v.RuleARN)
}
if v.RuleName != nil {
ok := object.Key("RuleName")
ok.String(*v.RuleName)
}
if v.ServiceName != nil {
ok := object.Key("ServiceName")
ok.String(*v.ServiceName)
}
if v.ServiceType != nil {
ok := object.Key("ServiceType")
ok.String(*v.ServiceType)
}
if v.URLPath != nil {
ok := object.Key("URLPath")
ok.String(*v.URLPath)
}
return nil
}
func awsRestjson1_serializeDocumentSamplingStatisticsDocument(v *types.SamplingStatisticsDocument, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.BorrowCount != 0 {
ok := object.Key("BorrowCount")
ok.Integer(v.BorrowCount)
}
if v.ClientID != nil {
ok := object.Key("ClientID")
ok.String(*v.ClientID)
}
{
ok := object.Key("RequestCount")
ok.Integer(v.RequestCount)
}
if v.RuleName != nil {
ok := object.Key("RuleName")
ok.String(*v.RuleName)
}
{
ok := object.Key("SampledCount")
ok.Integer(v.SampledCount)
}
if v.Timestamp != nil {
ok := object.Key("Timestamp")
ok.Double(smithytime.FormatEpochSeconds(*v.Timestamp))
}
return nil
}
func awsRestjson1_serializeDocumentSamplingStatisticsDocumentList(v []types.SamplingStatisticsDocument, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
for i := range v {
av := array.Value()
if err := awsRestjson1_serializeDocumentSamplingStatisticsDocument(&v[i], av); err != nil {
return err
}
}
return nil
}
func awsRestjson1_serializeDocumentSamplingStrategy(v *types.SamplingStrategy, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if len(v.Name) > 0 {
ok := object.Key("Name")
ok.String(string(v.Name))
}
if v.Value != nil {
ok := object.Key("Value")
switch {
case math.IsNaN(*v.Value):
ok.String("NaN")
case math.IsInf(*v.Value, 1):
ok.String("Infinity")
case math.IsInf(*v.Value, -1):
ok.String("-Infinity")
default:
ok.Double(*v.Value)
}
}
return nil
}
func awsRestjson1_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.Key != nil {
ok := object.Key("Key")
ok.String(*v.Key)
}
if v.Value != nil {
ok := object.Key("Value")
ok.String(*v.Value)
}
return nil
}
func awsRestjson1_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
for i := range v {
av := array.Value()
av.String(v[i])
}
return nil
}
func awsRestjson1_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
for i := range v {
av := array.Value()
if err := awsRestjson1_serializeDocumentTag(&v[i], av); err != nil {
return err
}
}
return nil
}
func awsRestjson1_serializeDocumentTelemetryRecord(v *types.TelemetryRecord, value smithyjson.Value) error {
object := value.Object()
defer object.Close()
if v.BackendConnectionErrors != nil {
ok := object.Key("BackendConnectionErrors")
if err := awsRestjson1_serializeDocumentBackendConnectionErrors(v.BackendConnectionErrors, ok); err != nil {
return err
}
}
if v.SegmentsReceivedCount != nil {
ok := object.Key("SegmentsReceivedCount")
ok.Integer(*v.SegmentsReceivedCount)
}
if v.SegmentsRejectedCount != nil {
ok := object.Key("SegmentsRejectedCount")
ok.Integer(*v.SegmentsRejectedCount)
}
if v.SegmentsSentCount != nil {
ok := object.Key("SegmentsSentCount")
ok.Integer(*v.SegmentsSentCount)
}
if v.SegmentsSpilloverCount != nil {
ok := object.Key("SegmentsSpilloverCount")
ok.Integer(*v.SegmentsSpilloverCount)
}
if v.Timestamp != nil {
ok := object.Key("Timestamp")
ok.Double(smithytime.FormatEpochSeconds(*v.Timestamp))
}
return nil
}
func awsRestjson1_serializeDocumentTelemetryRecordList(v []types.TelemetryRecord, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
for i := range v {
av := array.Value()
if err := awsRestjson1_serializeDocumentTelemetryRecord(&v[i], av); err != nil {
return err
}
}
return nil
}
func awsRestjson1_serializeDocumentTraceIdList(v []string, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
for i := range v {
av := array.Value()
av.String(v[i])
}
return nil
}
func awsRestjson1_serializeDocumentTraceSegmentDocumentList(v []string, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
for i := range v {
av := array.Value()
av.String(v[i])
}
return nil
}
| 2,760 |
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package xray
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/service/xray/types"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/middleware"
)
type validateOpBatchGetTraces struct {
}
func (*validateOpBatchGetTraces) ID() string {
return "OperationInputValidation"
}
func (m *validateOpBatchGetTraces) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*BatchGetTracesInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpBatchGetTracesInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpCreateGroup struct {
}
func (*validateOpCreateGroup) ID() string {
return "OperationInputValidation"
}
func (m *validateOpCreateGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*CreateGroupInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpCreateGroupInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpCreateSamplingRule struct {
}
func (*validateOpCreateSamplingRule) ID() string {
return "OperationInputValidation"
}
func (m *validateOpCreateSamplingRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*CreateSamplingRuleInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpCreateSamplingRuleInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDeleteResourcePolicy struct {
}
func (*validateOpDeleteResourcePolicy) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDeleteResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DeleteResourcePolicyInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDeleteResourcePolicyInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetInsightEvents struct {
}
func (*validateOpGetInsightEvents) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetInsightEvents) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetInsightEventsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetInsightEventsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetInsightImpactGraph struct {
}
func (*validateOpGetInsightImpactGraph) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetInsightImpactGraph) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetInsightImpactGraphInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetInsightImpactGraphInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetInsight struct {
}
func (*validateOpGetInsight) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetInsight) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetInsightInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetInsightInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetInsightSummaries struct {
}
func (*validateOpGetInsightSummaries) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetInsightSummaries) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetInsightSummariesInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetInsightSummariesInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetSamplingTargets struct {
}
func (*validateOpGetSamplingTargets) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetSamplingTargets) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetSamplingTargetsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetSamplingTargetsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetServiceGraph struct {
}
func (*validateOpGetServiceGraph) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetServiceGraph) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetServiceGraphInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetServiceGraphInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetTimeSeriesServiceStatistics struct {
}
func (*validateOpGetTimeSeriesServiceStatistics) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetTimeSeriesServiceStatistics) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetTimeSeriesServiceStatisticsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetTimeSeriesServiceStatisticsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetTraceGraph struct {
}
func (*validateOpGetTraceGraph) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetTraceGraph) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetTraceGraphInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetTraceGraphInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetTraceSummaries struct {
}
func (*validateOpGetTraceSummaries) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetTraceSummaries) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetTraceSummariesInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetTraceSummariesInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpListTagsForResource struct {
}
func (*validateOpListTagsForResource) ID() string {
return "OperationInputValidation"
}
func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*ListTagsForResourceInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpListTagsForResourceInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpPutEncryptionConfig struct {
}
func (*validateOpPutEncryptionConfig) ID() string {
return "OperationInputValidation"
}
func (m *validateOpPutEncryptionConfig) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*PutEncryptionConfigInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpPutEncryptionConfigInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpPutResourcePolicy struct {
}
func (*validateOpPutResourcePolicy) ID() string {
return "OperationInputValidation"
}
func (m *validateOpPutResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*PutResourcePolicyInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpPutResourcePolicyInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpPutTelemetryRecords struct {
}
func (*validateOpPutTelemetryRecords) ID() string {
return "OperationInputValidation"
}
func (m *validateOpPutTelemetryRecords) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*PutTelemetryRecordsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpPutTelemetryRecordsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpPutTraceSegments struct {
}
func (*validateOpPutTraceSegments) ID() string {
return "OperationInputValidation"
}
func (m *validateOpPutTraceSegments) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*PutTraceSegmentsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpPutTraceSegmentsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpTagResource struct {
}
func (*validateOpTagResource) ID() string {
return "OperationInputValidation"
}
func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*TagResourceInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpTagResourceInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUntagResource struct {
}
func (*validateOpUntagResource) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UntagResourceInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUntagResourceInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUpdateSamplingRule struct {
}
func (*validateOpUpdateSamplingRule) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUpdateSamplingRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UpdateSamplingRuleInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUpdateSamplingRuleInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
func addOpBatchGetTracesValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpBatchGetTraces{}, middleware.After)
}
func addOpCreateGroupValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpCreateGroup{}, middleware.After)
}
func addOpCreateSamplingRuleValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpCreateSamplingRule{}, middleware.After)
}
func addOpDeleteResourcePolicyValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDeleteResourcePolicy{}, middleware.After)
}
func addOpGetInsightEventsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetInsightEvents{}, middleware.After)
}
func addOpGetInsightImpactGraphValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetInsightImpactGraph{}, middleware.After)
}
func addOpGetInsightValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetInsight{}, middleware.After)
}
func addOpGetInsightSummariesValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetInsightSummaries{}, middleware.After)
}
func addOpGetSamplingTargetsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetSamplingTargets{}, middleware.After)
}
func addOpGetServiceGraphValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetServiceGraph{}, middleware.After)
}
func addOpGetTimeSeriesServiceStatisticsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetTimeSeriesServiceStatistics{}, middleware.After)
}
func addOpGetTraceGraphValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetTraceGraph{}, middleware.After)
}
func addOpGetTraceSummariesValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetTraceSummaries{}, middleware.After)
}
func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After)
}
func addOpPutEncryptionConfigValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpPutEncryptionConfig{}, middleware.After)
}
func addOpPutResourcePolicyValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpPutResourcePolicy{}, middleware.After)
}
func addOpPutTelemetryRecordsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpPutTelemetryRecords{}, middleware.After)
}
func addOpPutTraceSegmentsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpPutTraceSegments{}, middleware.After)
}
func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpTagResource{}, middleware.After)
}
func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After)
}
func addOpUpdateSamplingRuleValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUpdateSamplingRule{}, middleware.After)
}
func validateSamplingRule(v *types.SamplingRule) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "SamplingRule"}
if v.ResourceARN == nil {
invalidParams.Add(smithy.NewErrParamRequired("ResourceARN"))
}
if v.ServiceName == nil {
invalidParams.Add(smithy.NewErrParamRequired("ServiceName"))
}
if v.ServiceType == nil {
invalidParams.Add(smithy.NewErrParamRequired("ServiceType"))
}
if v.Host == nil {
invalidParams.Add(smithy.NewErrParamRequired("Host"))
}
if v.HTTPMethod == nil {
invalidParams.Add(smithy.NewErrParamRequired("HTTPMethod"))
}
if v.URLPath == nil {
invalidParams.Add(smithy.NewErrParamRequired("URLPath"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateSamplingStatisticsDocument(v *types.SamplingStatisticsDocument) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "SamplingStatisticsDocument"}
if v.RuleName == nil {
invalidParams.Add(smithy.NewErrParamRequired("RuleName"))
}
if v.ClientID == nil {
invalidParams.Add(smithy.NewErrParamRequired("ClientID"))
}
if v.Timestamp == nil {
invalidParams.Add(smithy.NewErrParamRequired("Timestamp"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateSamplingStatisticsDocumentList(v []types.SamplingStatisticsDocument) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "SamplingStatisticsDocumentList"}
for i := range v {
if err := validateSamplingStatisticsDocument(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateTag(v *types.Tag) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "Tag"}
if v.Key == nil {
invalidParams.Add(smithy.NewErrParamRequired("Key"))
}
if v.Value == nil {
invalidParams.Add(smithy.NewErrParamRequired("Value"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateTagList(v []types.Tag) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "TagList"}
for i := range v {
if err := validateTag(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateTelemetryRecord(v *types.TelemetryRecord) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "TelemetryRecord"}
if v.Timestamp == nil {
invalidParams.Add(smithy.NewErrParamRequired("Timestamp"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateTelemetryRecordList(v []types.TelemetryRecord) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "TelemetryRecordList"}
for i := range v {
if err := validateTelemetryRecord(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpBatchGetTracesInput(v *BatchGetTracesInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "BatchGetTracesInput"}
if v.TraceIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("TraceIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpCreateGroupInput(v *CreateGroupInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "CreateGroupInput"}
if v.GroupName == nil {
invalidParams.Add(smithy.NewErrParamRequired("GroupName"))
}
if v.Tags != nil {
if err := validateTagList(v.Tags); err != nil {
invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpCreateSamplingRuleInput(v *CreateSamplingRuleInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "CreateSamplingRuleInput"}
if v.SamplingRule == nil {
invalidParams.Add(smithy.NewErrParamRequired("SamplingRule"))
} else if v.SamplingRule != nil {
if err := validateSamplingRule(v.SamplingRule); err != nil {
invalidParams.AddNested("SamplingRule", err.(smithy.InvalidParamsError))
}
}
if v.Tags != nil {
if err := validateTagList(v.Tags); err != nil {
invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDeleteResourcePolicyInput(v *DeleteResourcePolicyInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DeleteResourcePolicyInput"}
if v.PolicyName == nil {
invalidParams.Add(smithy.NewErrParamRequired("PolicyName"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetInsightEventsInput(v *GetInsightEventsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetInsightEventsInput"}
if v.InsightId == nil {
invalidParams.Add(smithy.NewErrParamRequired("InsightId"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetInsightImpactGraphInput(v *GetInsightImpactGraphInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetInsightImpactGraphInput"}
if v.InsightId == nil {
invalidParams.Add(smithy.NewErrParamRequired("InsightId"))
}
if v.StartTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("StartTime"))
}
if v.EndTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("EndTime"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetInsightInput(v *GetInsightInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetInsightInput"}
if v.InsightId == nil {
invalidParams.Add(smithy.NewErrParamRequired("InsightId"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetInsightSummariesInput(v *GetInsightSummariesInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetInsightSummariesInput"}
if v.StartTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("StartTime"))
}
if v.EndTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("EndTime"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetSamplingTargetsInput(v *GetSamplingTargetsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetSamplingTargetsInput"}
if v.SamplingStatisticsDocuments == nil {
invalidParams.Add(smithy.NewErrParamRequired("SamplingStatisticsDocuments"))
} else if v.SamplingStatisticsDocuments != nil {
if err := validateSamplingStatisticsDocumentList(v.SamplingStatisticsDocuments); err != nil {
invalidParams.AddNested("SamplingStatisticsDocuments", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetServiceGraphInput(v *GetServiceGraphInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetServiceGraphInput"}
if v.StartTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("StartTime"))
}
if v.EndTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("EndTime"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetTimeSeriesServiceStatisticsInput(v *GetTimeSeriesServiceStatisticsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetTimeSeriesServiceStatisticsInput"}
if v.StartTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("StartTime"))
}
if v.EndTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("EndTime"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetTraceGraphInput(v *GetTraceGraphInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetTraceGraphInput"}
if v.TraceIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("TraceIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetTraceSummariesInput(v *GetTraceSummariesInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetTraceSummariesInput"}
if v.StartTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("StartTime"))
}
if v.EndTime == nil {
invalidParams.Add(smithy.NewErrParamRequired("EndTime"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"}
if v.ResourceARN == nil {
invalidParams.Add(smithy.NewErrParamRequired("ResourceARN"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpPutEncryptionConfigInput(v *PutEncryptionConfigInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "PutEncryptionConfigInput"}
if len(v.Type) == 0 {
invalidParams.Add(smithy.NewErrParamRequired("Type"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpPutResourcePolicyInput(v *PutResourcePolicyInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "PutResourcePolicyInput"}
if v.PolicyName == nil {
invalidParams.Add(smithy.NewErrParamRequired("PolicyName"))
}
if v.PolicyDocument == nil {
invalidParams.Add(smithy.NewErrParamRequired("PolicyDocument"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpPutTelemetryRecordsInput(v *PutTelemetryRecordsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "PutTelemetryRecordsInput"}
if v.TelemetryRecords == nil {
invalidParams.Add(smithy.NewErrParamRequired("TelemetryRecords"))
} else if v.TelemetryRecords != nil {
if err := validateTelemetryRecordList(v.TelemetryRecords); err != nil {
invalidParams.AddNested("TelemetryRecords", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpPutTraceSegmentsInput(v *PutTraceSegmentsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "PutTraceSegmentsInput"}
if v.TraceSegmentDocuments == nil {
invalidParams.Add(smithy.NewErrParamRequired("TraceSegmentDocuments"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpTagResourceInput(v *TagResourceInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"}
if v.ResourceARN == nil {
invalidParams.Add(smithy.NewErrParamRequired("ResourceARN"))
}
if v.Tags == nil {
invalidParams.Add(smithy.NewErrParamRequired("Tags"))
} else if v.Tags != nil {
if err := validateTagList(v.Tags); err != nil {
invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUntagResourceInput(v *UntagResourceInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"}
if v.ResourceARN == nil {
invalidParams.Add(smithy.NewErrParamRequired("ResourceARN"))
}
if v.TagKeys == nil {
invalidParams.Add(smithy.NewErrParamRequired("TagKeys"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUpdateSamplingRuleInput(v *UpdateSamplingRuleInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UpdateSamplingRuleInput"}
if v.SamplingRuleUpdate == nil {
invalidParams.Add(smithy.NewErrParamRequired("SamplingRuleUpdate"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
| 1,019 |
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package endpoints
import (
"github.com/aws/aws-sdk-go-v2/aws"
endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
"github.com/aws/smithy-go/logging"
"regexp"
)
// Options is the endpoint resolver configuration options
type Options struct {
// Logger is a logging implementation that log events should be sent to.
Logger logging.Logger
// LogDeprecated indicates that deprecated endpoints should be logged to the
// provided logger.
LogDeprecated bool
// ResolvedRegion is used to override the region to be resolved, rather then the
// using the value passed to the ResolveEndpoint method. This value is used by the
// SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
// name. You must not set this value directly in your application.
ResolvedRegion string
// DisableHTTPS informs the resolver to return an endpoint that does not use the
// HTTPS scheme.
DisableHTTPS bool
// UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
UseDualStackEndpoint aws.DualStackEndpointState
// UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
UseFIPSEndpoint aws.FIPSEndpointState
}
func (o Options) GetResolvedRegion() string {
return o.ResolvedRegion
}
func (o Options) GetDisableHTTPS() bool {
return o.DisableHTTPS
}
func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
return o.UseDualStackEndpoint
}
func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
return o.UseFIPSEndpoint
}
func transformToSharedOptions(options Options) endpoints.Options {
return endpoints.Options{
Logger: options.Logger,
LogDeprecated: options.LogDeprecated,
ResolvedRegion: options.ResolvedRegion,
DisableHTTPS: options.DisableHTTPS,
UseDualStackEndpoint: options.UseDualStackEndpoint,
UseFIPSEndpoint: options.UseFIPSEndpoint,
}
}
// Resolver XRay endpoint resolver
type Resolver struct {
partitions endpoints.Partitions
}
// ResolveEndpoint resolves the service endpoint for the given region and options
func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
if len(region) == 0 {
return endpoint, &aws.MissingRegionError{}
}
opt := transformToSharedOptions(options)
return r.partitions.ResolveEndpoint(region, opt)
}
// New returns a new Resolver
func New() *Resolver {
return &Resolver{
partitions: defaultPartitions,
}
}
var partitionRegexp = struct {
Aws *regexp.Regexp
AwsCn *regexp.Regexp
AwsIso *regexp.Regexp
AwsIsoB *regexp.Regexp
AwsIsoE *regexp.Regexp
AwsIsoF *regexp.Regexp
AwsUsGov *regexp.Regexp
}{
Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"),
AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
}
var defaultPartitions = endpoints.Partitions{
{
ID: "aws",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.DualStackVariant,
}: {
Hostname: "xray.{region}.api.aws",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.{region}.amazonaws.com",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
}: {
Hostname: "xray-fips.{region}.api.aws",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "xray.{region}.amazonaws.com",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.Aws,
IsRegionalized: true,
Endpoints: endpoints.Endpoints{
endpoints.EndpointKey{
Region: "af-south-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-east-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-northeast-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-northeast-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-northeast-3",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-south-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-south-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-southeast-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-southeast-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-southeast-3",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-southeast-4",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ca-central-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-central-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-central-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-north-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-south-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-south-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-west-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-west-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-west-3",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "fips-us-east-1",
}: endpoints.Endpoint{
Hostname: "xray-fips.us-east-1.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "us-east-1",
},
Deprecated: aws.TrueTernary,
},
endpoints.EndpointKey{
Region: "fips-us-east-2",
}: endpoints.Endpoint{
Hostname: "xray-fips.us-east-2.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "us-east-2",
},
Deprecated: aws.TrueTernary,
},
endpoints.EndpointKey{
Region: "fips-us-west-1",
}: endpoints.Endpoint{
Hostname: "xray-fips.us-west-1.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "us-west-1",
},
Deprecated: aws.TrueTernary,
},
endpoints.EndpointKey{
Region: "fips-us-west-2",
}: endpoints.Endpoint{
Hostname: "xray-fips.us-west-2.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "us-west-2",
},
Deprecated: aws.TrueTernary,
},
endpoints.EndpointKey{
Region: "me-central-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "me-south-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "sa-east-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "us-east-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "us-east-1",
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.us-east-1.amazonaws.com",
},
endpoints.EndpointKey{
Region: "us-east-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "us-east-2",
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.us-east-2.amazonaws.com",
},
endpoints.EndpointKey{
Region: "us-west-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "us-west-1",
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.us-west-1.amazonaws.com",
},
endpoints.EndpointKey{
Region: "us-west-2",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "us-west-2",
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.us-west-2.amazonaws.com",
},
},
},
{
ID: "aws-cn",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.DualStackVariant,
}: {
Hostname: "xray.{region}.api.amazonwebservices.com.cn",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.{region}.amazonaws.com.cn",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
}: {
Hostname: "xray-fips.{region}.api.amazonwebservices.com.cn",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "xray.{region}.amazonaws.com.cn",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.AwsCn,
IsRegionalized: true,
Endpoints: endpoints.Endpoints{
endpoints.EndpointKey{
Region: "cn-north-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "cn-northwest-1",
}: endpoints.Endpoint{},
},
},
{
ID: "aws-iso",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.{region}.c2s.ic.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "xray.{region}.c2s.ic.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.AwsIso,
IsRegionalized: true,
},
{
ID: "aws-iso-b",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.{region}.sc2s.sgov.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "xray.{region}.sc2s.sgov.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.AwsIsoB,
IsRegionalized: true,
},
{
ID: "aws-iso-e",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.{region}.cloud.adc-e.uk",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "xray.{region}.cloud.adc-e.uk",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.AwsIsoE,
IsRegionalized: true,
},
{
ID: "aws-iso-f",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.{region}.csp.hci.ic.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "xray.{region}.csp.hci.ic.gov",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.AwsIsoF,
IsRegionalized: true,
},
{
ID: "aws-us-gov",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
{
Variant: endpoints.DualStackVariant,
}: {
Hostname: "xray.{region}.api.aws",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.{region}.amazonaws.com",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
}: {
Hostname: "xray-fips.{region}.api.aws",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
{
Variant: 0,
}: {
Hostname: "xray.{region}.amazonaws.com",
Protocols: []string{"https"},
SignatureVersions: []string{"v4"},
},
},
RegionRegex: partitionRegexp.AwsUsGov,
IsRegionalized: true,
Endpoints: endpoints.Endpoints{
endpoints.EndpointKey{
Region: "fips-us-gov-east-1",
}: endpoints.Endpoint{
Hostname: "xray-fips.us-gov-east-1.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "us-gov-east-1",
},
Deprecated: aws.TrueTernary,
},
endpoints.EndpointKey{
Region: "fips-us-gov-west-1",
}: endpoints.Endpoint{
Hostname: "xray-fips.us-gov-west-1.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "us-gov-west-1",
},
Deprecated: aws.TrueTernary,
},
endpoints.EndpointKey{
Region: "us-gov-east-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "us-gov-east-1",
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.us-gov-east-1.amazonaws.com",
},
endpoints.EndpointKey{
Region: "us-gov-west-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "us-gov-west-1",
Variant: endpoints.FIPSVariant,
}: {
Hostname: "xray-fips.us-gov-west-1.amazonaws.com",
},
},
},
}
| 486 |
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package endpoints
import (
"testing"
)
func TestRegexCompile(t *testing.T) {
_ = defaultPartitions
}
| 12 |
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package types
type EncryptionStatus string
// Enum values for EncryptionStatus
const (
EncryptionStatusUpdating EncryptionStatus = "UPDATING"
EncryptionStatusActive EncryptionStatus = "ACTIVE"
)
// Values returns all known values for EncryptionStatus. Note that this can be
// expanded in the future, and so it is only as up to date as the client. The
// ordering of this slice is not guaranteed to be stable across updates.
func (EncryptionStatus) Values() []EncryptionStatus {
return []EncryptionStatus{
"UPDATING",
"ACTIVE",
}
}
type EncryptionType string
// Enum values for EncryptionType
const (
EncryptionTypeNone EncryptionType = "NONE"
EncryptionTypeKms EncryptionType = "KMS"
)
// Values returns all known values for EncryptionType. Note that this can be
// expanded in the future, and so it is only as up to date as the client. The
// ordering of this slice is not guaranteed to be stable across updates.
func (EncryptionType) Values() []EncryptionType {
return []EncryptionType{
"NONE",
"KMS",
}
}
type InsightCategory string
// Enum values for InsightCategory
const (
InsightCategoryFault InsightCategory = "FAULT"
)
// Values returns all known values for InsightCategory. Note that this can be
// expanded in the future, and so it is only as up to date as the client. The
// ordering of this slice is not guaranteed to be stable across updates.
func (InsightCategory) Values() []InsightCategory {
return []InsightCategory{
"FAULT",
}
}
type InsightState string
// Enum values for InsightState
const (
InsightStateActive InsightState = "ACTIVE"
InsightStateClosed InsightState = "CLOSED"
)
// Values returns all known values for InsightState. Note that this can be
// expanded in the future, and so it is only as up to date as the client. The
// ordering of this slice is not guaranteed to be stable across updates.
func (InsightState) Values() []InsightState {
return []InsightState{
"ACTIVE",
"CLOSED",
}
}
type SamplingStrategyName string
// Enum values for SamplingStrategyName
const (
SamplingStrategyNamePartialScan SamplingStrategyName = "PartialScan"
SamplingStrategyNameFixedRate SamplingStrategyName = "FixedRate"
)
// Values returns all known values for SamplingStrategyName. Note that this can be
// expanded in the future, and so it is only as up to date as the client. The
// ordering of this slice is not guaranteed to be stable across updates.
func (SamplingStrategyName) Values() []SamplingStrategyName {
return []SamplingStrategyName{
"PartialScan",
"FixedRate",
}
}
type TimeRangeType string
// Enum values for TimeRangeType
const (
TimeRangeTypeTraceId TimeRangeType = "TraceId"
TimeRangeTypeEvent TimeRangeType = "Event"
)
// Values returns all known values for TimeRangeType. Note that this can be
// expanded in the future, and so it is only as up to date as the client. The
// ordering of this slice is not guaranteed to be stable across updates.
func (TimeRangeType) Values() []TimeRangeType {
return []TimeRangeType{
"TraceId",
"Event",
}
}
| 110 |
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package types
import (
"fmt"
smithy "github.com/aws/smithy-go"
)
// A policy revision id was provided which does not match the latest policy
// revision. This exception is also if a policy revision id of 0 is provided via
// PutResourcePolicy and a policy with the same name already exists.
type InvalidPolicyRevisionIdException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *InvalidPolicyRevisionIdException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *InvalidPolicyRevisionIdException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *InvalidPolicyRevisionIdException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidPolicyRevisionIdException"
}
return *e.ErrorCodeOverride
}
func (e *InvalidPolicyRevisionIdException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request is missing required parameters or has invalid parameters.
type InvalidRequestException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *InvalidRequestException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *InvalidRequestException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *InvalidRequestException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidRequestException"
}
return *e.ErrorCodeOverride
}
func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The provided resource policy would prevent the caller of this request from
// calling PutResourcePolicy in the future.
type LockoutPreventionException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *LockoutPreventionException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *LockoutPreventionException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *LockoutPreventionException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "LockoutPreventionException"
}
return *e.ErrorCodeOverride
}
func (e *LockoutPreventionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Invalid policy document provided in request.
type MalformedPolicyDocumentException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *MalformedPolicyDocumentException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *MalformedPolicyDocumentException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *MalformedPolicyDocumentException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "MalformedPolicyDocumentException"
}
return *e.ErrorCodeOverride
}
func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Exceeded the maximum number of resource policies for a target Amazon Web
// Services account.
type PolicyCountLimitExceededException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *PolicyCountLimitExceededException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *PolicyCountLimitExceededException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *PolicyCountLimitExceededException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "PolicyCountLimitExceededException"
}
return *e.ErrorCodeOverride
}
func (e *PolicyCountLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Exceeded the maximum size for a resource policy.
type PolicySizeLimitExceededException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *PolicySizeLimitExceededException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *PolicySizeLimitExceededException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *PolicySizeLimitExceededException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "PolicySizeLimitExceededException"
}
return *e.ErrorCodeOverride
}
func (e *PolicySizeLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The resource was not found. Verify that the name or Amazon Resource Name (ARN)
// of the resource is correct.
type ResourceNotFoundException struct {
Message *string
ErrorCodeOverride *string
ResourceName *string
noSmithyDocumentSerde
}
func (e *ResourceNotFoundException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *ResourceNotFoundException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *ResourceNotFoundException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "ResourceNotFoundException"
}
return *e.ErrorCodeOverride
}
func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// You have reached the maximum number of sampling rules.
type RuleLimitExceededException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *RuleLimitExceededException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *RuleLimitExceededException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *RuleLimitExceededException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "RuleLimitExceededException"
}
return *e.ErrorCodeOverride
}
func (e *RuleLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request exceeds the maximum number of requests per second.
type ThrottledException struct {
Message *string
ErrorCodeOverride *string
noSmithyDocumentSerde
}
func (e *ThrottledException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *ThrottledException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *ThrottledException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "ThrottledException"
}
return *e.ErrorCodeOverride
}
func (e *ThrottledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// You have exceeded the maximum number of tags you can apply to this resource.
type TooManyTagsException struct {
Message *string
ErrorCodeOverride *string
ResourceName *string
noSmithyDocumentSerde
}
func (e *TooManyTagsException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
}
func (e *TooManyTagsException) ErrorMessage() string {
if e.Message == nil {
return ""
}
return *e.Message
}
func (e *TooManyTagsException) ErrorCode() string {
if e == nil || e.ErrorCodeOverride == nil {
return "TooManyTagsException"
}
return *e.ErrorCodeOverride
}
func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
| 278 |
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package types
import (
smithydocument "github.com/aws/smithy-go/document"
"time"
)
// An alias for an edge.
type Alias struct {
// The canonical name of the alias.
Name *string
// A list of names for the alias, including the canonical name.
Names []string
// The type of the alias.
Type *string
noSmithyDocumentSerde
}
// Value of a segment annotation. Has one of three value types: Number, Boolean,
// or String.
//
// The following types satisfy this interface:
//
// AnnotationValueMemberBooleanValue
// AnnotationValueMemberNumberValue
// AnnotationValueMemberStringValue
type AnnotationValue interface {
isAnnotationValue()
}
// Value for a Boolean annotation.
type AnnotationValueMemberBooleanValue struct {
Value bool
noSmithyDocumentSerde
}
func (*AnnotationValueMemberBooleanValue) isAnnotationValue() {}
// Value for a Number annotation.
type AnnotationValueMemberNumberValue struct {
Value float64
noSmithyDocumentSerde
}
func (*AnnotationValueMemberNumberValue) isAnnotationValue() {}
// Value for a String annotation.
type AnnotationValueMemberStringValue struct {
Value string
noSmithyDocumentSerde
}
func (*AnnotationValueMemberStringValue) isAnnotationValue() {}
// The service within the service graph that has anomalously high fault rates.
type AnomalousService struct {
//
ServiceId *ServiceId
noSmithyDocumentSerde
}
// A list of Availability Zones corresponding to the segments in a trace.
type AvailabilityZoneDetail struct {
// The name of a corresponding Availability Zone.
Name *string
noSmithyDocumentSerde
}
type BackendConnectionErrors struct {
//
ConnectionRefusedCount *int32
//
HTTPCode4XXCount *int32
//
HTTPCode5XXCount *int32
//
OtherCount *int32
//
TimeoutCount *int32
//
UnknownHostCount *int32
noSmithyDocumentSerde
}
// Information about a connection between two services. An edge can be a
// synchronous connection, such as typical call between client and service, or an
// asynchronous link, such as a Lambda function which retrieves an event from an
// SNS queue.
type Edge struct {
// Aliases for the edge.
Aliases []Alias
// Describes an asynchronous connection, with a value of link .
EdgeType *string
// The end time of the last segment on the edge.
EndTime *time.Time
// A histogram that maps the spread of event age when received by consumers. Age
// is calculated each time an event is received. Only populated when EdgeType is
// link .
ReceivedEventAgeHistogram []HistogramEntry
// Identifier of the edge. Unique within a service map.
ReferenceId *int32
// A histogram that maps the spread of client response times on an edge. Only
// populated for synchronous edges.
ResponseTimeHistogram []HistogramEntry
// The start time of the first segment on the edge.
StartTime *time.Time
// Response statistics for segments on the edge.
SummaryStatistics *EdgeStatistics
noSmithyDocumentSerde
}
// Response statistics for an edge.
type EdgeStatistics struct {
// Information about requests that failed with a 4xx Client Error status code.
ErrorStatistics *ErrorStatistics
// Information about requests that failed with a 5xx Server Error status code.
FaultStatistics *FaultStatistics
// The number of requests that completed with a 2xx Success status code.
OkCount *int64
// The total number of completed requests.
TotalCount *int64
// The aggregate response time of completed requests.
TotalResponseTime *float64
noSmithyDocumentSerde
}
// A configuration document that specifies encryption configuration settings.
type EncryptionConfig struct {
// The ID of the KMS key used for encryption, if applicable.
KeyId *string
// The encryption status. While the status is UPDATING , X-Ray may encrypt data
// with a combination of the new and old settings.
Status EncryptionStatus
// The type of encryption. Set to KMS for encryption with KMS keys. Set to NONE
// for default encryption.
Type EncryptionType
noSmithyDocumentSerde
}
// The root cause of a trace summary error.
type ErrorRootCause struct {
// A flag that denotes that the root cause impacts the trace client.
ClientImpacting *bool
// A list of services corresponding to an error. A service identifies a segment
// and it contains a name, account ID, type, and inferred flag.
Services []ErrorRootCauseService
noSmithyDocumentSerde
}
// A collection of segments and corresponding subsegments associated to a trace
// summary error.
type ErrorRootCauseEntity struct {
// The types and messages of the exceptions.
Exceptions []RootCauseException
// The name of the entity.
Name *string
// A flag that denotes a remote subsegment.
Remote *bool
noSmithyDocumentSerde
}
// A collection of fields identifying the services in a trace summary error.
type ErrorRootCauseService struct {
// The account ID associated to the service.
AccountId *string
// The path of root cause entities found on the service.
EntityPath []ErrorRootCauseEntity
// A Boolean value indicating if the service is inferred from the trace.
Inferred *bool
// The service name.
Name *string
// A collection of associated service names.
Names []string
// The type associated to the service.
Type *string
noSmithyDocumentSerde
}
// Information about requests that failed with a 4xx Client Error status code.
type ErrorStatistics struct {
// The number of requests that failed with untracked 4xx Client Error status codes.
OtherCount *int64
// The number of requests that failed with a 419 throttling status code.
ThrottleCount *int64
// The total number of requests that failed with a 4xx Client Error status code.
TotalCount *int64
noSmithyDocumentSerde
}
// The root cause information for a trace summary fault.
type FaultRootCause struct {
// A flag that denotes that the root cause impacts the trace client.
ClientImpacting *bool
// A list of corresponding services. A service identifies a segment and it
// contains a name, account ID, type, and inferred flag.
Services []FaultRootCauseService
noSmithyDocumentSerde
}
// A collection of segments and corresponding subsegments associated to a trace
// summary fault error.
type FaultRootCauseEntity struct {
// The types and messages of the exceptions.
Exceptions []RootCauseException
// The name of the entity.
Name *string
// A flag that denotes a remote subsegment.
Remote *bool
noSmithyDocumentSerde
}
// A collection of fields identifying the services in a trace summary fault.
type FaultRootCauseService struct {
// The account ID associated to the service.
AccountId *string
// The path of root cause entities found on the service.
EntityPath []FaultRootCauseEntity
// A Boolean value indicating if the service is inferred from the trace.
Inferred *bool
// The service name.
Name *string
// A collection of associated service names.
Names []string
// The type associated to the service.
Type *string
noSmithyDocumentSerde
}
// Information about requests that failed with a 5xx Server Error status code.
type FaultStatistics struct {
// The number of requests that failed with untracked 5xx Server Error status codes.
OtherCount *int64
// The total number of requests that failed with a 5xx Server Error status code.
TotalCount *int64
noSmithyDocumentSerde
}
// The predicted high and low fault count. This is used to determine if a service
// has become anomalous and if an insight should be created.
type ForecastStatistics struct {
// The upper limit of fault counts for a service.
FaultCountHigh *int64
// The lower limit of fault counts for a service.
FaultCountLow *int64
noSmithyDocumentSerde
}
// Details and metadata for a group.
type Group struct {
// The filter expression defining the parameters to include traces.
FilterExpression *string
// The Amazon Resource Name (ARN) of the group generated based on the GroupName.
GroupARN *string
// The unique case-sensitive name of the group.
GroupName *string
// The structure containing configurations related to insights.
// - The InsightsEnabled boolean can be set to true to enable insights for the
// group or false to disable insights for the group.
// - The NotificationsEnabled boolean can be set to true to enable insights
// notifications through Amazon EventBridge for the group.
InsightsConfiguration *InsightsConfiguration
noSmithyDocumentSerde
}
// Details for a group without metadata.
type GroupSummary struct {
// The filter expression defining the parameters to include traces.
FilterExpression *string
// The ARN of the group generated based on the GroupName.
GroupARN *string
// The unique case-sensitive name of the group.
GroupName *string
// The structure containing configurations related to insights.
// - The InsightsEnabled boolean can be set to true to enable insights for the
// group or false to disable insights for the group.
// - The NotificationsEnabled boolean can be set to true to enable insights
// notifications. Notifications can only be enabled on a group with InsightsEnabled
// set to true.
InsightsConfiguration *InsightsConfiguration
noSmithyDocumentSerde
}
// An entry in a histogram for a statistic. A histogram maps the range of observed
// values on the X axis, and the prevalence of each value on the Y axis.
type HistogramEntry struct {
// The prevalence of the entry.
Count int32
// The value of the entry.
Value float64
noSmithyDocumentSerde
}
// Information about an HTTP request.
type Http struct {
// The IP address of the requestor.
ClientIp *string
// The request method.
HttpMethod *string
// The response status.
HttpStatus *int32
// The request URL.
HttpURL *string
// The request's user agent string.
UserAgent *string
noSmithyDocumentSerde
}
// When fault rates go outside of the expected range, X-Ray creates an insight.
// Insights tracks emergent issues within your applications.
type Insight struct {
// The categories that label and describe the type of insight.
Categories []InsightCategory
// The impact statistics of the client side service. This includes the number of
// requests to the client service and whether the requests were faults or okay.
ClientRequestImpactStatistics *RequestImpactStatistics
// The time, in Unix seconds, at which the insight ended.
EndTime *time.Time
// The Amazon Resource Name (ARN) of the group that the insight belongs to.
GroupARN *string
// The name of the group that the insight belongs to.
GroupName *string
// The insights unique identifier.
InsightId *string
//
RootCauseServiceId *ServiceId
// The impact statistics of the root cause service. This includes the number of
// requests to the client service and whether the requests were faults or okay.
RootCauseServiceRequestImpactStatistics *RequestImpactStatistics
// The time, in Unix seconds, at which the insight began.
StartTime *time.Time
// The current state of the insight.
State InsightState
// A brief description of the insight.
Summary *string
// The service within the insight that is most impacted by the incident.
TopAnomalousServices []AnomalousService
noSmithyDocumentSerde
}
// X-Ray reevaluates insights periodically until they are resolved, and records
// each intermediate state in an event. You can review incident events in the
// Impact Timeline on the Inspect page in the X-Ray console.
type InsightEvent struct {
// The impact statistics of the client side service. This includes the number of
// requests to the client service and whether the requests were faults or okay.
ClientRequestImpactStatistics *RequestImpactStatistics
// The time, in Unix seconds, at which the event was recorded.
EventTime *time.Time
// The impact statistics of the root cause service. This includes the number of
// requests to the client service and whether the requests were faults or okay.
RootCauseServiceRequestImpactStatistics *RequestImpactStatistics
// A brief description of the event.
Summary *string
// The service during the event that is most impacted by the incident.
TopAnomalousServices []AnomalousService
noSmithyDocumentSerde
}
// The connection between two service in an insight impact graph.
type InsightImpactGraphEdge struct {
// Identifier of the edge. Unique within a service map.
ReferenceId *int32
noSmithyDocumentSerde
}
// Information about an application that processed requests, users that made
// requests, or downstream services, resources, and applications that an
// application used.
type InsightImpactGraphService struct {
// Identifier of the Amazon Web Services account in which the service runs.
AccountId *string
// Connections to downstream services.
Edges []InsightImpactGraphEdge
// The canonical name of the service.
Name *string
// A list of names for the service, including the canonical name.
Names []string
// Identifier for the service. Unique within the service map.
ReferenceId *int32
// Identifier for the service. Unique within the service map.
// - Amazon Web Services Resource - The type of an Amazon Web Services resource.
// For example, AWS::EC2::Instance for an application running on Amazon EC2 or
// AWS::DynamoDB::Table for an Amazon DynamoDB table that the application used.
// - Amazon Web Services Service - The type of an Amazon Web Services service.
// For example, AWS::DynamoDB for downstream calls to Amazon DynamoDB that didn't
// target a specific table.
// - Amazon Web Services Service - The type of an Amazon Web Services service.
// For example, AWS::DynamoDB for downstream calls to Amazon DynamoDB that didn't
// target a specific table.
// - remote - A downstream service of indeterminate type.
Type *string
noSmithyDocumentSerde
}
// The structure containing configurations related to insights.
type InsightsConfiguration struct {
// Set the InsightsEnabled value to true to enable insights or false to disable
// insights.
InsightsEnabled *bool
// Set the NotificationsEnabled value to true to enable insights notifications.
// Notifications can only be enabled on a group with InsightsEnabled set to true.
NotificationsEnabled *bool
noSmithyDocumentSerde
}
// Information that describes an insight.
type InsightSummary struct {
// Categories The categories that label and describe the type of insight.
Categories []InsightCategory
// The impact statistics of the client side service. This includes the number of
// requests to the client service and whether the requests were faults or okay.
ClientRequestImpactStatistics *RequestImpactStatistics
// The time, in Unix seconds, at which the insight ended.
EndTime *time.Time
// The Amazon Resource Name (ARN) of the group that the insight belongs to.
GroupARN *string
// The name of the group that the insight belongs to.
GroupName *string
// The insights unique identifier.
InsightId *string
// The time, in Unix seconds, that the insight was last updated.
LastUpdateTime *time.Time
//
RootCauseServiceId *ServiceId
// The impact statistics of the root cause service. This includes the number of
// requests to the client service and whether the requests were faults or okay.
RootCauseServiceRequestImpactStatistics *RequestImpactStatistics
// The time, in Unix seconds, at which the insight began.
StartTime *time.Time
// The current state of the insight.
State InsightState
// A brief description of the insight.
Summary *string
// The service within the insight that is most impacted by the incident.
TopAnomalousServices []AnomalousService
noSmithyDocumentSerde
}
// A list of EC2 instance IDs corresponding to the segments in a trace.
type InstanceIdDetail struct {
// The ID of a corresponding EC2 instance.
Id *string
noSmithyDocumentSerde
}
// Statistics that describe how the incident has impacted a service.
type RequestImpactStatistics struct {
// The number of requests that have resulted in a fault,
FaultCount *int64
// The number of successful requests.
OkCount *int64
// The total number of requests to the service.
TotalCount *int64
noSmithyDocumentSerde
}
// A list of resources ARNs corresponding to the segments in a trace.
type ResourceARNDetail struct {
// The ARN of a corresponding resource.
ARN *string
noSmithyDocumentSerde
}
// A resource policy grants one or more Amazon Web Services services and accounts
// permissions to access X-Ray. Each resource policy is associated with a specific
// Amazon Web Services account.
type ResourcePolicy struct {
// When the policy was last updated, in Unix time seconds.
LastUpdatedTime *time.Time
// The resource policy document, which can be up to 5kb in size.
PolicyDocument *string
// The name of the resource policy. Must be unique within a specific Amazon Web
// Services account.
PolicyName *string
// Returns the current policy revision id for this policy name.
PolicyRevisionId *string
noSmithyDocumentSerde
}
// The root cause information for a response time warning.
type ResponseTimeRootCause struct {
// A flag that denotes that the root cause impacts the trace client.
ClientImpacting *bool
// A list of corresponding services. A service identifies a segment and contains a
// name, account ID, type, and inferred flag.
Services []ResponseTimeRootCauseService
noSmithyDocumentSerde
}
// A collection of segments and corresponding subsegments associated to a response
// time warning.
type ResponseTimeRootCauseEntity struct {
// The type and messages of the exceptions.
Coverage *float64
// The name of the entity.
Name *string
// A flag that denotes a remote subsegment.
Remote *bool
noSmithyDocumentSerde
}
// A collection of fields identifying the service in a response time warning.
type ResponseTimeRootCauseService struct {
// The account ID associated to the service.
AccountId *string
// The path of root cause entities found on the service.
EntityPath []ResponseTimeRootCauseEntity
// A Boolean value indicating if the service is inferred from the trace.
Inferred *bool
// The service name.
Name *string
// A collection of associated service names.
Names []string
// The type associated to the service.
Type *string
noSmithyDocumentSerde
}
// The exception associated with a root cause.
type RootCauseException struct {
// The message of the exception.
Message *string
// The name of the exception.
Name *string
noSmithyDocumentSerde
}
// A sampling rule that services use to decide whether to instrument a request.
// Rule fields can match properties of the service, or properties of a request. The
// service can ignore rules that don't match its properties.
type SamplingRule struct {
// The percentage of matching requests to instrument, after the reservoir is
// exhausted.
//
// This member is required.
FixedRate float64
// Matches the HTTP method of a request.
//
// This member is required.
HTTPMethod *string
// Matches the hostname from a request URL.
//
// This member is required.
Host *string
// The priority of the sampling rule.
//
// This member is required.
Priority int32
// A fixed number of matching requests to instrument per second, prior to applying
// the fixed rate. The reservoir is not used directly by services, but applies to
// all services using the rule collectively.
//
// This member is required.
ReservoirSize int32
// Matches the ARN of the Amazon Web Services resource on which the service runs.
//
// This member is required.
ResourceARN *string
// Matches the name that the service uses to identify itself in segments.
//
// This member is required.
ServiceName *string
// Matches the origin that the service uses to identify its type in segments.
//
// This member is required.
ServiceType *string
// Matches the path from a request URL.
//
// This member is required.
URLPath *string
// The version of the sampling rule format ( 1 ).
//
// This member is required.
Version int32
// Matches attributes derived from the request.
Attributes map[string]string
// The ARN of the sampling rule. Specify a rule by either name or ARN, but not
// both.
RuleARN *string
// The name of the sampling rule. Specify a rule by either name or ARN, but not
// both.
RuleName *string
noSmithyDocumentSerde
}
// A SamplingRule (https://docs.aws.amazon.com/xray/latest/api/API_SamplingRule.html)
// and its metadata.
type SamplingRuleRecord struct {
// When the rule was created.
CreatedAt *time.Time
// When the rule was last modified.
ModifiedAt *time.Time
// The sampling rule.
SamplingRule *SamplingRule
noSmithyDocumentSerde
}
// A document specifying changes to a sampling rule's configuration.
type SamplingRuleUpdate struct {
// Matches attributes derived from the request.
Attributes map[string]string
// The percentage of matching requests to instrument, after the reservoir is
// exhausted.
FixedRate *float64
// Matches the HTTP method of a request.
HTTPMethod *string
// Matches the hostname from a request URL.
Host *string
// The priority of the sampling rule.
Priority *int32
// A fixed number of matching requests to instrument per second, prior to applying
// the fixed rate. The reservoir is not used directly by services, but applies to
// all services using the rule collectively.
ReservoirSize *int32
// Matches the ARN of the Amazon Web Services resource on which the service runs.
ResourceARN *string
// The ARN of the sampling rule. Specify a rule by either name or ARN, but not
// both.
RuleARN *string
// The name of the sampling rule. Specify a rule by either name or ARN, but not
// both.
RuleName *string
// Matches the name that the service uses to identify itself in segments.
ServiceName *string
// Matches the origin that the service uses to identify its type in segments.
ServiceType *string
// Matches the path from a request URL.
URLPath *string
noSmithyDocumentSerde
}
// Request sampling results for a single rule from a service. Results are for the
// last 10 seconds unless the service has been assigned a longer reporting interval
// after a previous call to GetSamplingTargets (https://docs.aws.amazon.com/xray/latest/api/API_GetSamplingTargets.html)
// .
type SamplingStatisticsDocument struct {
// A unique identifier for the service in hexadecimal.
//
// This member is required.
ClientID *string
// The number of requests that matched the rule.
//
// This member is required.
RequestCount int32
// The name of the sampling rule.
//
// This member is required.
RuleName *string
// The number of requests recorded.
//
// This member is required.
SampledCount int32
// The current time.
//
// This member is required.
Timestamp *time.Time
// The number of requests recorded with borrowed reservoir quota.
BorrowCount int32
noSmithyDocumentSerde
}
// Aggregated request sampling data for a sampling rule across all services for a
// 10-second window.
type SamplingStatisticSummary struct {
// The number of requests recorded with borrowed reservoir quota.
BorrowCount int32
// The number of requests that matched the rule.
RequestCount int32
// The name of the sampling rule.
RuleName *string
// The number of requests recorded.
SampledCount int32
// The start time of the reporting window.
Timestamp *time.Time
noSmithyDocumentSerde
}
// The name and value of a sampling rule to apply to a trace summary.
type SamplingStrategy struct {
// The name of a sampling rule.
Name SamplingStrategyName
// The value of a sampling rule.
Value *float64
noSmithyDocumentSerde
}
// Temporary changes to a sampling rule configuration. To meet the global sampling
// target for a rule, X-Ray calculates a new reservoir for each service based on
// the recent sampling results of all services that called GetSamplingTargets (https://docs.aws.amazon.com/xray/latest/api/API_GetSamplingTargets.html)
// .
type SamplingTargetDocument struct {
// The percentage of matching requests to instrument, after the reservoir is
// exhausted.
FixedRate float64
// The number of seconds for the service to wait before getting sampling targets
// again.
Interval *int32
// The number of requests per second that X-Ray allocated for this service.
ReservoirQuota *int32
// When the reservoir quota expires.
ReservoirQuotaTTL *time.Time
// The name of the sampling rule.
RuleName *string
noSmithyDocumentSerde
}
// A segment from a trace that has been ingested by the X-Ray service. The segment
// can be compiled from documents uploaded with PutTraceSegments (https://docs.aws.amazon.com/xray/latest/api/API_PutTraceSegments.html)
// , or an inferred segment for a downstream service, generated from a subsegment
// sent by the service that called it. For the full segment document schema, see
// Amazon Web Services X-Ray Segment Documents (https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html)
// in the Amazon Web Services X-Ray Developer Guide.
type Segment struct {
// The segment document.
Document *string
// The segment's ID.
Id *string
noSmithyDocumentSerde
}
// Information about an application that processed requests, users that made
// requests, or downstream services, resources, and applications that an
// application used.
type Service struct {
// Identifier of the Amazon Web Services account in which the service runs.
AccountId *string
// A histogram that maps the spread of service durations.
DurationHistogram []HistogramEntry
// Connections to downstream services.
Edges []Edge
// The end time of the last segment that the service generated.
EndTime *time.Time
// The canonical name of the service.
Name *string
// A list of names for the service, including the canonical name.
Names []string
// Identifier for the service. Unique within the service map.
ReferenceId *int32
// A histogram that maps the spread of service response times.
ResponseTimeHistogram []HistogramEntry
// Indicates that the service was the first service to process a request.
Root *bool
// The start time of the first segment that the service generated.
StartTime *time.Time
// The service's state.
State *string
// Aggregated statistics for the service.
SummaryStatistics *ServiceStatistics
// The type of service.
// - Amazon Web Services Resource - The type of an Amazon Web Services resource.
// For example, AWS::EC2::Instance for an application running on Amazon EC2 or
// AWS::DynamoDB::Table for an Amazon DynamoDB table that the application used.
// - Amazon Web Services Service - The type of an Amazon Web Services service.
// For example, AWS::DynamoDB for downstream calls to Amazon DynamoDB that didn't
// target a specific table.
// - client - Represents the clients that sent requests to a root service.
// - remote - A downstream service of indeterminate type.
Type *string
noSmithyDocumentSerde
}
type ServiceId struct {
//
AccountId *string
//
Name *string
//
Names []string
//
Type *string
noSmithyDocumentSerde
}
// Response statistics for a service.
type ServiceStatistics struct {
// Information about requests that failed with a 4xx Client Error status code.
ErrorStatistics *ErrorStatistics
// Information about requests that failed with a 5xx Server Error status code.
FaultStatistics *FaultStatistics
// The number of requests that completed with a 2xx Success status code.
OkCount *int64
// The total number of completed requests.
TotalCount *int64
// The aggregate response time of completed requests.
TotalResponseTime *float64
noSmithyDocumentSerde
}
// A map that contains tag keys and tag values to attach to an Amazon Web Services
// X-Ray group or sampling rule. For more information about ways to use tags, see
// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
// in the Amazon Web Services General Reference. The following restrictions apply
// to tags:
// - Maximum number of user-applied tags per resource: 50
// - Tag keys and values are case sensitive.
// - Don't use aws: as a prefix for keys; it's reserved for Amazon Web Services
// use. You cannot edit or delete system tags.
type Tag struct {
// A tag key, such as Stage or Name . A tag key cannot be empty. The key can be a
// maximum of 128 characters, and can contain only Unicode letters, numbers, or
// separators, or the following special characters: + - = . _ : /
//
// This member is required.
Key *string
// An optional tag value, such as Production or test-only . The value can be a
// maximum of 255 characters, and contain only Unicode letters, numbers, or
// separators, or the following special characters: + - = . _ : /
//
// This member is required.
Value *string
noSmithyDocumentSerde
}
type TelemetryRecord struct {
//
//
// This member is required.
Timestamp *time.Time
//
BackendConnectionErrors *BackendConnectionErrors
//
SegmentsReceivedCount *int32
//
SegmentsRejectedCount *int32
//
SegmentsSentCount *int32
//
SegmentsSpilloverCount *int32
noSmithyDocumentSerde
}
// A list of TimeSeriesStatistic structures.
type TimeSeriesServiceStatistics struct {
// Response statistics for an edge.
EdgeSummaryStatistics *EdgeStatistics
// The response time histogram for the selected entities.
ResponseTimeHistogram []HistogramEntry
// The forecasted high and low fault count values.
ServiceForecastStatistics *ForecastStatistics
// Response statistics for a service.
ServiceSummaryStatistics *ServiceStatistics
// Timestamp of the window for which statistics are aggregated.
Timestamp *time.Time
noSmithyDocumentSerde
}
// A collection of segment documents with matching trace IDs.
type Trace struct {
// The length of time in seconds between the start time of the root segment and
// the end time of the last segment that completed.
Duration *float64
// The unique identifier for the request that generated the trace's segments and
// subsegments.
Id *string
// LimitExceeded is set to true when the trace has exceeded the Trace document size
// limit. For more information about this limit and other X-Ray limits and quotas,
// see Amazon Web Services X-Ray endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/xray.html)
// .
LimitExceeded *bool
// Segment documents for the segments and subsegments that comprise the trace.
Segments []Segment
noSmithyDocumentSerde
}
// Metadata generated from the segment documents in a trace.
type TraceSummary struct {
// Annotations from the trace's segment documents.
Annotations map[string][]ValueWithServiceIds
// A list of Availability Zones for any zone corresponding to the trace segments.
AvailabilityZones []AvailabilityZoneDetail
// The length of time in seconds between the start time of the root segment and
// the end time of the last segment that completed.
Duration *float64
// The root of a trace.
EntryPoint *ServiceId
// A collection of ErrorRootCause structures corresponding to the trace segments.
ErrorRootCauses []ErrorRootCause
// A collection of FaultRootCause structures corresponding to the trace segments.
FaultRootCauses []FaultRootCause
// The root segment document has a 400 series error.
HasError *bool
// The root segment document has a 500 series error.
HasFault *bool
// One or more of the segment documents has a 429 throttling error.
HasThrottle *bool
// Information about the HTTP request served by the trace.
Http *Http
// The unique identifier for the request that generated the trace's segments and
// subsegments.
Id *string
// A list of EC2 instance IDs for any instance corresponding to the trace segments.
InstanceIds []InstanceIdDetail
// One or more of the segment documents is in progress.
IsPartial *bool
// The matched time stamp of a defined event.
MatchedEventTime *time.Time
// A list of resource ARNs for any resource corresponding to the trace segments.
ResourceARNs []ResourceARNDetail
// The length of time in seconds between the start and end times of the root
// segment. If the service performs work asynchronously, the response time measures
// the time before the response is sent to the user, while the duration measures
// the amount of time before the last traced activity completes.
ResponseTime *float64
// A collection of ResponseTimeRootCause structures corresponding to the trace
// segments.
ResponseTimeRootCauses []ResponseTimeRootCause
// The revision number of a trace.
Revision int32
// Service IDs from the trace's segment documents.
ServiceIds []ServiceId
// Users from the trace's segment documents.
Users []TraceUser
noSmithyDocumentSerde
}
// Information about a user recorded in segment documents.
type TraceUser struct {
// Services that the user's request hit.
ServiceIds []ServiceId
// The user's name.
UserName *string
noSmithyDocumentSerde
}
// Sampling statistics from a call to GetSamplingTargets (https://docs.aws.amazon.com/xray/latest/api/API_GetSamplingTargets.html)
// that X-Ray could not process.
type UnprocessedStatistics struct {
// The error code.
ErrorCode *string
// The error message.
Message *string
// The name of the sampling rule.
RuleName *string
noSmithyDocumentSerde
}
// Information about a segment that failed processing.
type UnprocessedTraceSegment struct {
// The error that caused processing to fail.
ErrorCode *string
// The segment's ID.
Id *string
// The error message.
Message *string
noSmithyDocumentSerde
}
// Information about a segment annotation.
type ValueWithServiceIds struct {
// Values of the annotation.
AnnotationValue AnnotationValue
// Services to which the annotation applies.
ServiceIds []ServiceId
noSmithyDocumentSerde
}
type noSmithyDocumentSerde = smithydocument.NoSerde
// UnknownUnionMember is returned when a union member is returned over the wire,
// but has an unknown tag.
type UnknownUnionMember struct {
Tag string
Value []byte
noSmithyDocumentSerde
}
func (*UnknownUnionMember) isAnnotationValue() {}
| 1,277 |
aws-sdk-go-v2 | aws | Go | // Code generated by smithy-go-codegen DO NOT EDIT.
package types_test
import (
"fmt"
"github.com/aws/aws-sdk-go-v2/service/xray/types"
)
func ExampleAnnotationValue_outputUsage() {
var union types.AnnotationValue
// type switches can be used to check the union value
switch v := union.(type) {
case *types.AnnotationValueMemberBooleanValue:
_ = v.Value // Value is bool
case *types.AnnotationValueMemberNumberValue:
_ = v.Value // Value is float64
case *types.AnnotationValueMemberStringValue:
_ = v.Value // Value is string
case *types.UnknownUnionMember:
fmt.Println("unknown tag:", v.Tag)
default:
fmt.Println("union is nil or unknown type")
}
}
var _ *string
var _ *bool
var _ *float64
| 35 |
aws-secretsmanager-caching-go | aws | Go | package scintegtests
import (
"bytes"
"math/rand"
"regexp"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
"github.com/aws/aws-secretsmanager-caching-go/secretcache"
)
var (
randStringSet = []rune("abcdefghijklmnopqrstuvwxyz0123456789")
secretNamePrefix = "scIntegTest_"
subTests = []func(t *testing.T, api secretsmanageriface.SecretsManagerAPI) string{
integTest_getSecretBinary,
integTest_getSecretBinaryWithStage,
integTest_getSecretString,
integTest_getSecretStringWithStage,
integTest_getSecretStringWithTTL,
integTest_getSecretStringNoSecret,
}
)
func init() {
rand.Seed(time.Now().Unix())
}
func generateRandString(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = randStringSet[rand.Intn(len(randStringSet))]
}
return string(b)
}
func generateSecretName(testName string) (string, string) {
clientRequestToken := generateRandString(32)
secretName := secretNamePrefix + testName + "_" + clientRequestToken
return secretName, clientRequestToken
}
func createSecret(
testName string, secretString *string, secretBinary []byte, api secretsmanageriface.SecretsManagerAPI,
) (*secretsmanager.CreateSecretOutput, error) {
secretName, requestToken := generateSecretName(testName)
createSecretInput := &secretsmanager.CreateSecretInput{
Name: &secretName,
SecretString: secretString,
SecretBinary: secretBinary,
ClientRequestToken: &requestToken,
}
return api.CreateSecret(createSecretInput)
}
// Lazily delete all the secrets we created
// Also delete secrets created over 2 days ago, with the "scIntegTest_" prefix
func cleanupSecrets(secretNames *[]string, secretsManagerClient *secretsmanager.SecretsManager, t *testing.T) {
// Cleanup secrets created on this test run
performDelete(secretNames, secretsManagerClient, true)
prevRunSecrets := getPrevRunSecrets(secretsManagerClient)
for _, secretARN := range prevRunSecrets {
t.Logf("Scheduling deletion for secret: \"%s\"", secretARN)
}
// Cleanup secrets created on past runs
performDelete(&prevRunSecrets, secretsManagerClient, false)
}
func getPrevRunSecrets(secretsManagerClient *secretsmanager.SecretsManager) []string {
var nextToken *string
var secretNames []string
twoDaysAgo := time.Now().Add(-(48 * time.Hour))
testSecretNamePrefix := "^" + secretNamePrefix + ".+"
for {
resp, err := secretsManagerClient.ListSecrets(
&secretsmanager.ListSecretsInput{NextToken: nextToken},
)
if resp == nil || err != nil {
break
}
for _, secret := range resp.SecretList {
var name []byte
copy(name, *secret.Name)
match, _ := regexp.Match(testSecretNamePrefix, name)
if match && secret.LastChangedDate.Before(twoDaysAgo) && secret.LastAccessedDate.Before(twoDaysAgo) {
secretNames = append(secretNames, *secret.ARN)
}
}
if resp.NextToken == nil {
break
}
nextToken = resp.NextToken
time.Sleep(1 * time.Second)
}
return secretNames
}
func performDelete(secretNames *[]string, secretsManagerClient *secretsmanager.SecretsManager, forceDelete bool) {
for _, secretName := range *secretNames {
if secretName == "" {
continue
}
time.Sleep(time.Second / 2)
_, _ = secretsManagerClient.DeleteSecret(&secretsmanager.DeleteSecretInput{
SecretId: &secretName,
ForceDeleteWithoutRecovery: &forceDelete,
})
}
}
func TestIntegration(t *testing.T) {
// Create a new API client
// See https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ for how the session loads credentials
sess, err := session.NewSession()
if err != nil {
t.Fatal(err)
}
secretsManagerClient := secretsmanager.New(sess)
// Collect the secret arns created for them
var secretNames []string
// Defer cleanup of secrets to ensure cleanup in case of caller function being terminated
defer cleanupSecrets(&secretNames, secretsManagerClient, t)
// Run integ tests
for _, testFunc := range subTests {
secretNames = append(secretNames, testFunc(t, secretsManagerClient))
}
}
func integTest_getSecretBinary(t *testing.T, api secretsmanageriface.SecretsManagerAPI) string {
cache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = api },
)
secretBinary := []byte{0, 1, 1, 0, 0, 1, 1, 0}
createResult, err := createSecret("getSecretBinary", nil, secretBinary, api)
if err != nil {
t.Errorf("Failed to create secret \"getSecretBinary\" ERROR: %s", err)
return ""
}
resultBinary, err := cache.GetSecretBinary(*createResult.ARN)
if err != nil {
t.Error(err)
return *createResult.ARN
}
if !bytes.Equal(resultBinary, secretBinary) {
t.Error("Expected and result binary not the same")
}
return *createResult.ARN
}
func integTest_getSecretBinaryWithStage(t *testing.T, api secretsmanageriface.SecretsManagerAPI) string {
cache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = api },
)
secretBinary := []byte{0, 1, 1, 0, 0, 1, 1, 0}
createResult, err := createSecret("getSecretBinaryWithStage", nil, secretBinary, api)
if err != nil {
t.Errorf("Failed to create secret \"getSecretBinaryWithStage\" ERROR: %s", err)
return ""
}
updatedSecretBinary := []byte{1, 0, 0, 1, 1, 0, 0, 1}
updatedRequestToken := generateRandString(32)
_, err = api.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: createResult.ARN,
SecretBinary: updatedSecretBinary,
ClientRequestToken: &updatedRequestToken,
})
if err != nil {
t.Errorf("Failed to update secret: \"%s\" ERROR: %s", *createResult.ARN, err)
return *createResult.ARN
}
resultBinary, err := cache.GetSecretBinaryWithStage(*createResult.ARN, "AWSPREVIOUS")
if err != nil {
t.Error(err)
return *createResult.ARN
}
if !bytes.Equal(resultBinary, secretBinary) {
t.Error("Expected and result binary not the same")
}
resultBinary, err = cache.GetSecretBinaryWithStage(*createResult.ARN, "AWSCURRENT")
if err != nil {
t.Error(err)
return *createResult.ARN
}
if !bytes.Equal(resultBinary, updatedSecretBinary) {
t.Error("Expected and result binary not the same")
}
return *createResult.ARN
}
func integTest_getSecretString(t *testing.T, api secretsmanageriface.SecretsManagerAPI) string {
cache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = api },
)
secretString := "This is a secret"
createResult, err := createSecret("getSecretString", &secretString, nil, api)
if err != nil {
t.Errorf("Failed to create secret: \"getSecretString\" ERROR: %s", err)
return ""
}
resultString, err := cache.GetSecretString(*createResult.ARN)
if err != nil {
t.Error(err)
return *createResult.ARN
}
if secretString != resultString {
t.Errorf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, resultString)
}
return *createResult.ARN
}
func integTest_getSecretStringWithStage(t *testing.T, api secretsmanageriface.SecretsManagerAPI) string {
cache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = api },
)
secretString := "This is a secret string"
createResult, err := createSecret("getSecretStringWithStage", &secretString, nil, api)
if err != nil {
t.Errorf("Failed to create secret: \"getSecretStringWithStage\" ERROR: %s", err)
return ""
}
updatedSecretString := "This is v2 secret string"
updatedRequestToken := generateRandString(32)
_, err = api.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: createResult.ARN,
SecretString: &updatedSecretString,
ClientRequestToken: &updatedRequestToken,
})
if err != nil {
t.Errorf("Failed to update secret: \"%s\" ERROR: %s", *createResult.ARN, err)
return *createResult.ARN
}
resultString, err := cache.GetSecretStringWithStage(*createResult.ARN, "AWSPREVIOUS")
if err != nil {
t.Error(err)
return *createResult.ARN
}
if secretString != resultString {
t.Errorf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, resultString)
}
resultString, err = cache.GetSecretStringWithStage(*createResult.ARN, "AWSCURRENT")
if err != nil {
t.Error(err)
return *createResult.ARN
}
if resultString != updatedSecretString {
t.Errorf("Expected and result secret string are different - \"%s\", \"%s\"", updatedSecretString, resultString)
}
return *createResult.ARN
}
func integTest_getSecretStringWithTTL(t *testing.T, api secretsmanageriface.SecretsManagerAPI) string {
ttlNanoSeconds := (time.Second * 2).Nanoseconds()
cache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = api },
func(c *secretcache.Cache) { c.CacheItemTTL = ttlNanoSeconds },
)
secretString := "This is a secret"
createResult, err := createSecret("getSecretStringWithTTL", &secretString, nil, api)
if err != nil {
t.Errorf("Failed to create secret: \"getSecretStringWithTTL\" ERROR: %s", err)
return ""
}
resultString, err := cache.GetSecretString(*createResult.ARN)
if err != nil {
t.Error(err)
return *createResult.ARN
}
if secretString != resultString {
t.Errorf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, resultString)
return *createResult.ARN
}
updatedSecretString := "This is v2 secret string"
updatedRequestToken := generateRandString(32)
_, err = api.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: createResult.ARN,
SecretString: &updatedSecretString,
ClientRequestToken: &updatedRequestToken,
})
if err != nil {
t.Errorf("Failed to update secret: \"%s\" ERROR: %s", *createResult.ARN, err)
return *createResult.ARN
}
resultString, err = cache.GetSecretString(*createResult.ARN)
if err != nil {
t.Error(err)
return *createResult.ARN
}
if secretString != resultString {
t.Errorf("Expected cached secret to be same as previous version - \"%s\", \"%s\"", resultString, secretString)
return *createResult.ARN
}
time.Sleep(time.Nanosecond * time.Duration(ttlNanoSeconds))
resultString, err = cache.GetSecretString(*createResult.ARN)
if err != nil {
t.Error(err)
return *createResult.ARN
}
if updatedSecretString != resultString {
t.Errorf("Expected cached secret to be same as updated version - \"%s\", \"%s\"", resultString, updatedSecretString)
return *createResult.ARN
}
return *createResult.ARN
}
func integTest_getSecretStringNoSecret(t *testing.T, api secretsmanageriface.SecretsManagerAPI) string {
cache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = api },
)
secretName := "NoSuchSecret"
_, err := cache.GetSecretString(secretName)
if err == nil {
t.Errorf("Expected to not find a secret called %s", secretName)
} else if awsErr, _ := err.(awserr.Error); awsErr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
t.Errorf("Expected %s err but got %s", secretsmanager.ErrCodeResourceNotFoundException, awsErr.Code())
}
return ""
}
| 389 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
// Package secretcache provides the Cache struct for in-memory caching of secrets stored in AWS Secrets Manager
// Also exports a CacheHook, for pre-store and post-fetch processing of cached values
package secretcache
import (
"context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
)
// Cache client for AWS Secrets Manager secrets.
type Cache struct {
lru *lruCache
CacheConfig
Client secretsmanageriface.SecretsManagerAPI
}
// New constructs a secret cache using functional options, uses defaults otherwise.
// Initialises a SecretsManager Client from a new session.Session.
// Initialises CacheConfig to default values.
// Initialises lru cache with a default max size.
func New(optFns ...func(*Cache)) (*Cache, error) {
cache := &Cache{
//Initialise default configuration
CacheConfig: CacheConfig{
MaxCacheSize: DefaultMaxCacheSize,
VersionStage: DefaultVersionStage,
CacheItemTTL: DefaultCacheItemTTL,
},
}
// Iterate over options allowing user to specify alternate
// configurations.
for _, optFn := range optFns {
optFn(cache)
}
//Initialise lru cache
cache.lru = newLRUCache(cache.MaxCacheSize)
//Initialise the secrets manager client
if cache.Client == nil {
sess, err := session.NewSession()
if err != nil {
return nil, err
}
cache.Client = secretsmanager.New(sess)
}
return cache, nil
}
// getCachedSecret gets a cached secret for the given secret identifier.
// Returns cached secret item.
func (c *Cache) getCachedSecret(secretId string) *secretCacheItem {
lruValue, found := c.lru.get(secretId)
if !found {
cacheItem := newSecretCacheItem(c.CacheConfig, c.Client, secretId)
c.lru.putIfAbsent(secretId, &cacheItem)
lruValue, _ = c.lru.get(secretId)
}
secretCacheItem, _ := lruValue.(*secretCacheItem)
return secretCacheItem
}
// GetSecretString gets the secret string value from the cache for given secret id and a default version stage.
// Returns the secret string and an error if operation failed.
func (c *Cache) GetSecretString(secretId string) (string, error) {
return c.GetSecretStringWithContext(aws.BackgroundContext(), secretId)
}
func (c *Cache) GetSecretStringWithContext(ctx context.Context, secretId string) (string, error) {
return c.GetSecretStringWithStageWithContext(ctx, secretId, DefaultVersionStage)
}
// GetSecretStringWithStage gets the secret string value from the cache for given secret id and version stage.
// Returns the secret string and an error if operation failed.
func (c *Cache) GetSecretStringWithStage(secretId string, versionStage string) (string, error) {
return c.GetSecretStringWithStageWithContext(aws.BackgroundContext(), secretId, versionStage)
}
func (c *Cache) GetSecretStringWithStageWithContext(ctx context.Context, secretId string, versionStage string) (string, error) {
secretCacheItem := c.getCachedSecret(secretId)
getSecretValueOutput, err := secretCacheItem.getSecretValue(ctx, versionStage)
if err != nil {
return "", err
}
if getSecretValueOutput.SecretString == nil {
return "", &InvalidOperationError{
baseError{
Message: "requested secret version does not contain SecretString",
},
}
}
return *getSecretValueOutput.SecretString, nil
}
// GetSecretBinary gets the secret binary value from the cache for given secret id and a default version stage.
// Returns the secret binary and an error if operation failed.
func (c *Cache) GetSecretBinary(secretId string) ([]byte, error) {
return c.GetSecretBinaryWithContext(aws.BackgroundContext(), secretId)
}
func (c *Cache) GetSecretBinaryWithContext(ctx context.Context, secretId string) ([]byte, error) {
return c.GetSecretBinaryWithStageWithContext(ctx, secretId, DefaultVersionStage)
}
// GetSecretBinaryWithStage gets the secret binary value from the cache for given secret id and version stage.
// Returns the secret binary and an error if operation failed.
func (c *Cache) GetSecretBinaryWithStage(secretId string, versionStage string) ([]byte, error) {
return c.GetSecretBinaryWithStageWithContext(aws.BackgroundContext(), secretId, versionStage)
}
func (c *Cache) GetSecretBinaryWithStageWithContext(ctx context.Context, secretId string, versionStage string) ([]byte, error) {
secretCacheItem := c.getCachedSecret(secretId)
getSecretValueOutput, err := secretCacheItem.getSecretValue(ctx, versionStage)
if err != nil {
return nil, err
}
if getSecretValueOutput.SecretBinary == nil {
return nil, &InvalidOperationError{
baseError{
Message: "requested secret version does not contain SecretBinary",
},
}
}
return getSecretValueOutput.SecretBinary, nil
}
| 157 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
const (
DefaultMaxCacheSize = 1024
DefaultCacheItemTTL = 3600000000000 // 1 hour in nanoseconds
DefaultVersionStage = "AWSCURRENT"
)
// CacheConfig is the config object passed to the Cache struct
type CacheConfig struct {
//The maximum number of cached secrets to maintain before evicting secrets that
//have not been accessed recently.
MaxCacheSize int
//The number of nanoseconds that a cached item is considered valid before
// requiring a refresh of the secret state. Items that have exceeded this
// TTL will be refreshed synchronously when requesting the secret value. If
// the synchronous refresh failed, the stale secret will be returned.
CacheItemTTL int64
//The version stage that will be used when requesting the secret values for
//this cache.
VersionStage string
//Used to hook in-memory cache updates.
Hook CacheHook
}
| 41 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
// CacheHook is an interface to hook into the local in-memory cache. This interface will allow
// users to perform actions on the items being stored in the in-memory
// cache. One example would be encrypting/decrypting items stored in the
// in-memory cache.
type CacheHook interface {
// Put prepares the object for storing in the cache.
Put(data interface{}) interface{}
// Get derives the object from the cached object.
Get(data interface{}) interface{}
}
| 27 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache_test
import (
"bytes"
"github.com/aws/aws-secretsmanager-caching-go/secretcache"
"testing"
)
type DummyCacheHook struct {
putCount int
getCount int
}
func (hook *DummyCacheHook) Put(data interface{}) interface{} {
hook.putCount++
return data
}
func (hook *DummyCacheHook) Get(data interface{}) interface{} {
hook.getCount++
return data
}
func TestCacheHookString(t *testing.T) {
mockClient, secretId, secretString := newMockedClientWithDummyResults()
hook := &DummyCacheHook{}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.Hook = hook },
)
result, err := secretCache.GetSecretString(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
if hook.getCount != 2 {
t.Fatalf("Expected DummyCacheHook's get method to be called twice - once each for cacheItem and cacheVersion")
}
if hook.putCount != 2 {
t.Fatalf("Expected DummyCacheHook's put method to be called twice - once each for cacheItem and cacheVersion")
}
}
func TestCacheHookBinary(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 0, 0, 0, 1, 1, 1, 1}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = secretBinary
hook := &DummyCacheHook{}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.Hook = hook },
)
result, err := secretCache.GetSecretBinary(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result secret binary are different")
}
if hook.getCount != 2 {
t.Fatalf("Expected DummyCacheHook's get method to be called twice - once each for cacheItem and cacheVersion")
}
if hook.putCount != 2 {
t.Fatalf("Expected DummyCacheHook's put method to be called twice - once each for cacheItem and cacheVersion")
}
}
| 95 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
import (
"context"
"fmt"
"math"
"math/rand"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
)
// secretCacheItem maintains a cache of secret versions.
type secretCacheItem struct {
versions *lruCache
// The next scheduled refresh time for this item. Once the item is accessed
// after this time, the item will be synchronously refreshed.
nextRefreshTime int64
*cacheObject
}
// newSecretCacheItem initialises a secretCacheItem using default cache size and sets next refresh time to now
func newSecretCacheItem(config CacheConfig, client secretsmanageriface.SecretsManagerAPI, secretId string) secretCacheItem {
return secretCacheItem{
versions: newLRUCache(10),
cacheObject: &cacheObject{config: config, client: client, secretId: secretId, refreshNeeded: true},
nextRefreshTime: time.Now().UnixNano(),
}
}
// isRefreshNeeded determines if the cached item should be refreshed.
func (ci *secretCacheItem) isRefreshNeeded() bool {
if ci.cacheObject.isRefreshNeeded() {
return true
}
return ci.nextRefreshTime <= time.Now().UnixNano()
}
// getVersionId gets the version id for the given version stage.
// Returns the version id and a boolean to indicate success.
func (ci *secretCacheItem) getVersionId(versionStage string) (string, bool) {
result := ci.getWithHook()
if result == nil {
return "", false
}
if result.VersionIdsToStages == nil {
return "", false
}
for versionId, stages := range result.VersionIdsToStages {
for _, stage := range stages {
if versionStage == *stage {
return versionId, true
}
}
}
return "", false
}
// executeRefresh performs the actual refresh of the cached secret information.
// Returns the DescribeSecret API result and an error if call failed.
func (ci *secretCacheItem) executeRefresh(ctx context.Context) (*secretsmanager.DescribeSecretOutput, error) {
input := &secretsmanager.DescribeSecretInput{
SecretId: &ci.secretId,
}
result, err := ci.client.DescribeSecretWithContext(ctx, input, request.WithAppendUserAgent(userAgent()))
var maxTTL int64
if ci.config.CacheItemTTL == 0 {
maxTTL = DefaultCacheItemTTL
} else {
maxTTL = ci.config.CacheItemTTL
}
var ttl int64
if maxTTL < 0 {
return nil, &InvalidConfigError{
baseError{
Message: "cannot set negative ttl on cache",
},
}
} else if maxTTL < 2 {
ttl = maxTTL
} else {
ttl = rand.Int63n(maxTTL/2) + maxTTL/2
}
ci.nextRefreshTime = time.Now().Add(time.Nanosecond * time.Duration(ttl)).UnixNano()
return result, err
}
// getVersion gets the secret cache version associated with the given stage.
// Returns a boolean to indicate operation success.
func (ci *secretCacheItem) getVersion(versionStage string) (*cacheVersion, bool) {
versionId, versionIdFound := ci.getVersionId(versionStage)
if !versionIdFound {
return nil, false
}
cachedValue, cachedValueFound := ci.versions.get(versionId)
if !cachedValueFound {
cacheVersion := newCacheVersion(ci.config, ci.client, ci.secretId, versionId)
ci.versions.putIfAbsent(versionId, &cacheVersion)
cachedValue, _ = ci.versions.get(versionId)
}
secretCacheVersion, _ := cachedValue.(*cacheVersion)
return secretCacheVersion, true
}
// refresh the cached object when needed.
func (ci *secretCacheItem) refresh(ctx context.Context) {
if !ci.isRefreshNeeded() {
return
}
ci.refreshNeeded = false
result, err := ci.executeRefresh(ctx)
if err != nil {
ci.errorCount++
ci.err = err
delay := exceptionRetryDelayBase * math.Pow(exceptionRetryGrowthFactor, float64(ci.errorCount))
delay = math.Min(delay, exceptionRetryDelayMax)
delayDuration := time.Millisecond * time.Duration(delay)
ci.nextRetryTime = time.Now().Add(delayDuration).UnixNano()
return
}
ci.setWithHook(result)
ci.err = nil
ci.errorCount = 0
}
// getSecretValue gets the cached secret value for the given version stage.
// Returns the GetSecretValue API result and an error if operation fails.
func (ci *secretCacheItem) getSecretValue(ctx context.Context, versionStage string) (*secretsmanager.GetSecretValueOutput, error) {
if versionStage == "" && ci.config.VersionStage == "" {
versionStage = DefaultVersionStage
} else if versionStage == "" && ci.config.VersionStage != "" {
versionStage = ci.config.VersionStage
}
ci.mux.Lock()
defer ci.mux.Unlock()
ci.refresh(ctx)
version, ok := ci.getVersion(versionStage)
if !ok {
if ci.err != nil {
return nil, ci.err
} else {
return nil, &VersionNotFoundError{
baseError{
Message: fmt.Sprintf("could not find secret version for versionStage %s", versionStage),
},
}
}
}
return version.getSecretValue(ctx)
}
// setWithHook sets the cache item's data using the CacheHook, if one is configured.
func (ci *secretCacheItem) setWithHook(result *secretsmanager.DescribeSecretOutput) {
if ci.config.Hook != nil {
ci.data = ci.config.Hook.Put(result)
} else {
ci.data = result
}
}
// getWithHook gets the cache item's data using the CacheHook, if one is configured.
func (ci *secretCacheItem) getWithHook() *secretsmanager.DescribeSecretOutput {
var result interface{}
if ci.config.Hook != nil {
result = ci.config.Hook.Get(ci.data)
} else {
result = ci.data
}
if result == nil {
return nil
} else {
return result.(*secretsmanager.DescribeSecretOutput)
}
}
| 211 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
import (
"sync"
"time"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
)
const (
exceptionRetryDelayBase = 1
exceptionRetryGrowthFactor = 2
exceptionRetryDelayMax = 3600
)
// Base cache object for common properties.
type cacheObject struct {
mux sync.Mutex
config CacheConfig
client secretsmanageriface.SecretsManagerAPI
secretId string
err error
errorCount int
refreshNeeded bool
// The time to wait before retrying a failed AWS Secrets Manager request.
nextRetryTime int64
data interface{}
}
// isRefreshNeeded determines if the cached object should be refreshed.
func (o *cacheObject) isRefreshNeeded() bool {
if o.refreshNeeded {
return true
}
if o.err == nil {
return false
}
if o.nextRetryTime == 0 {
return true
}
return o.nextRetryTime <= time.Now().UnixNano()
}
| 60 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
import (
"errors"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
)
func TestIsRefreshNeededBase(t *testing.T) {
obj := cacheObject{refreshNeeded: true}
if !obj.isRefreshNeeded() {
t.Fatalf("Expected true when refreshNeeded is true")
}
obj.refreshNeeded = false
if obj.isRefreshNeeded() {
t.Fatalf("Expected false when err is nil")
}
obj.err = errors.New("some dummy error")
if !obj.isRefreshNeeded() {
t.Fatalf("Expected true when err is not nil")
}
obj.nextRetryTime = time.Now().Add(time.Hour * 1).UnixNano()
if obj.isRefreshNeeded() {
t.Fatalf("Expected false when nextRetryTime is in future")
}
obj.nextRetryTime = time.Now().Add(-(time.Hour * 1)).UnixNano()
if !obj.isRefreshNeeded() {
t.Fatalf("Expected true when nextRetryTime is in past")
}
}
func TestMaxCacheTTL(t *testing.T) {
mockClient := dummyClient{}
cacheItem := secretCacheItem{
cacheObject: &cacheObject{
secretId: "dummy-secret-name",
client: &mockClient,
data: &secretsmanager.DescribeSecretOutput{
ARN: getStrPtr("dummy-arn"),
Name: getStrPtr("dummy-name"),
Description: getStrPtr("dummy-description"),
},
},
}
config := CacheConfig{CacheItemTTL: -1}
cacheItem.config = config
_, err := cacheItem.executeRefresh(aws.BackgroundContext())
if err == nil {
t.Fatalf("Expected error due to negative cache ttl")
}
config = CacheConfig{CacheItemTTL: 0}
cacheItem.config = config
_, err = cacheItem.executeRefresh(aws.BackgroundContext())
if err != nil {
t.Fatalf("Unexpected error on zero cache ttl")
}
}
type dummyClient struct {
secretsmanageriface.SecretsManagerAPI
}
func (d *dummyClient) DescribeSecretWithContext(context aws.Context, input *secretsmanager.DescribeSecretInput, opts ...request.Option) (*secretsmanager.DescribeSecretOutput, error) {
return &secretsmanager.DescribeSecretOutput{}, nil
}
// Helper function to get a string pointer for input string.
func getStrPtr(str string) *string {
return &str
}
| 105 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
import (
"context"
"math"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
)
// cacheVersion is the cache object for a secret version.
type cacheVersion struct {
versionId string
*cacheObject
}
// newCacheVersion initialises a cacheVersion to cache a secret version.
func newCacheVersion(config CacheConfig, client secretsmanageriface.SecretsManagerAPI, secretId string, versionId string) cacheVersion {
return cacheVersion{
versionId: versionId,
cacheObject: &cacheObject{config: config, client: client, secretId: secretId, refreshNeeded: true},
}
}
// isRefreshNeeded determines if the cached item should be refreshed.
func (cv *cacheVersion) isRefreshNeeded() bool {
return cv.cacheObject.isRefreshNeeded()
}
// refresh the cached object when needed.
func (cv *cacheVersion) refresh(ctx context.Context) {
if !cv.isRefreshNeeded() {
return
}
cv.refreshNeeded = false
result, err := cv.executeRefresh(ctx)
if err != nil {
cv.errorCount++
cv.err = err
delay := exceptionRetryDelayBase * math.Pow(exceptionRetryGrowthFactor, float64(cv.errorCount))
delay = math.Min(delay, exceptionRetryDelayMax)
delayDuration := time.Nanosecond * time.Duration(delay)
cv.nextRetryTime = time.Now().Add(delayDuration).UnixNano()
return
}
cv.setWithHook(result)
cv.err = nil
cv.errorCount = 0
}
// executeRefresh performs the actual refresh of the cached secret information.
// Returns the GetSecretValue API result and an error if operation fails.
func (cv *cacheVersion) executeRefresh(ctx context.Context) (*secretsmanager.GetSecretValueOutput, error) {
input := &secretsmanager.GetSecretValueInput{
SecretId: &cv.secretId,
VersionId: &cv.versionId,
}
return cv.client.GetSecretValueWithContext(ctx, input, request.WithAppendUserAgent(userAgent()))
}
// getSecretValue gets the cached secret version value.
// Returns the GetSecretValue API cached result and an error if operation fails.
func (cv *cacheVersion) getSecretValue(ctx context.Context) (*secretsmanager.GetSecretValueOutput, error) {
cv.mux.Lock()
defer cv.mux.Unlock()
cv.refresh(ctx)
return cv.getWithHook(), cv.err
}
// setWithHook sets the cache item's data using the CacheHook, if one is configured.
func (cv *cacheVersion) setWithHook(result *secretsmanager.GetSecretValueOutput) {
if cv.config.Hook != nil {
cv.data = cv.config.Hook.Put(result)
} else {
cv.data = result
}
}
// getWithHook gets the cache item's data using the CacheHook, if one is configured.
func (cv *cacheVersion) getWithHook() *secretsmanager.GetSecretValueOutput {
var result interface{}
if cv.config.Hook != nil {
result = cv.config.Hook.Get(cv.data)
} else {
result = cv.data
}
if result == nil {
return nil
} else {
return result.(*secretsmanager.GetSecretValueOutput)
}
}
| 116 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache_test
import (
"bytes"
"errors"
"github.com/aws/aws-secretsmanager-caching-go/secretcache"
"testing"
"github.com/aws/aws-sdk-go/service/secretsmanager"
)
func TestInstantiatesClient(t *testing.T) {
secretCache, err := secretcache.New()
if err != nil || secretCache.Client == nil {
t.Fatalf("Failed to instantiate default Client")
}
}
func TestGetSecretString(t *testing.T) {
mockClient, _, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
result, err := secretCache.GetSecretString("test")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
func TestGetSecretBinary(t *testing.T) {
mockClient, _, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 1, 0, 0, 1, 1, 0}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = secretBinary
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
result, err := secretCache.GetSecretBinary("test")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result secret binary are different ")
}
}
func TestGetSecretMissing(t *testing.T) {
versionIdsToStages := make(map[string][]*string)
versionIdsToStages["01234567890123456789012345678901"] = []*string{getStrPtr("AWSCURRENT")}
mockClient := mockSecretsManagerClient{
MockedGetResult: &secretsmanager.GetSecretValueOutput{Name: getStrPtr("test")},
MockedDescribeResult: &secretsmanager.DescribeSecretOutput{VersionIdsToStages: versionIdsToStages},
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString("test")
if err == nil {
t.Fatalf("Expected to not find a SecretString in this version")
}
_, err = secretCache.GetSecretBinary("test")
if err == nil {
t.Fatalf("Expected to not find a SecretString in this version")
}
}
func TestGetSecretNoCurrent(t *testing.T) {
versionIdsToStages := make(map[string][]*string)
versionIdsToStages["01234567890123456789012345678901"] = []*string{getStrPtr("NOT_CURRENT")}
mockClient := mockSecretsManagerClient{
MockedGetResult: &secretsmanager.GetSecretValueOutput{
Name: getStrPtr("test"),
SecretString: getStrPtr("some secret string"),
VersionId: getStrPtr("01234567890123456789012345678901"),
},
MockedDescribeResult: &secretsmanager.DescribeSecretOutput{VersionIdsToStages: versionIdsToStages},
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString("test")
if err == nil {
t.Fatalf("Expected to not find secret version")
}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = []byte{0, 1, 0, 1, 0, 1, 0, 1}
_, err = secretCache.GetSecretBinary("test")
if err == nil {
t.Fatalf("Expected to not find secret version")
}
}
func TestGetSecretVersionNotFound(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
mockClient.MockedGetResult = nil
mockClient.GetSecretValueErr = errors.New("resourceNotFound")
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
_, err = secretCache.GetSecretBinary(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
}
func TestGetSecretNoVersions(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
mockClient.MockedGetResult = nil
mockClient.MockedDescribeResult.VersionIdsToStages = nil
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
_, err = secretCache.GetSecretBinary(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
}
func TestGetSecretStringMultipleTimes(t *testing.T) {
mockClient, secretId, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
result, err := secretCache.GetSecretString(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected DescribeSecret to be called once, was called - \"%d\" times", mockClient.DescribeSecretCallCount)
}
if mockClient.GetSecretValueCallCount != 1 {
t.Fatalf("Expected GetSecretValue to be called once, was called - \"%d\" times", mockClient.GetSecretValueCallCount)
}
}
func TestGetSecretBinaryMultipleTimes(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 0, 1, 1, 1, 0, 0}
mockClient.MockedGetResult.SecretBinary = secretBinary
mockClient.MockedGetResult.SecretString = nil
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
result, err := secretCache.GetSecretBinary(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result binary are different")
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected DescribeSecret to be called once, was called - \"%d\" times", mockClient.DescribeSecretCallCount)
}
if mockClient.GetSecretValueCallCount != 1 {
t.Fatalf("Expected GetSecretValue to be called once, was called - \"%d\" times", mockClient.GetSecretValueCallCount)
}
}
func TestGetSecretStringRefresh(t *testing.T) {
mockClient, secretId, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.CacheItemTTL = 1 },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretString(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
}
func TestGetSecretBinaryRefresh(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 1, 1, 1, 1, 0, 0}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = secretBinary
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.CacheItemTTL = 1 },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretBinary(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result secret binary are different")
}
}
}
func TestGetSecretStringWithStage(t *testing.T) {
mockClient, secretId, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretStringWithStage(secretId, "versionStage-42")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
}
func TestGetSecretBinaryWithStage(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 1, 0, 0, 1, 0, 1}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = secretBinary
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretBinaryWithStage(secretId, "versionStage-42")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result secret binary are different")
}
}
}
func TestGetSecretStringMultipleNotFound(t *testing.T) {
mockClient := mockSecretsManagerClient{
GetSecretValueErr: errors.New("versionNotFound"),
DescribeSecretErr: errors.New("secretNotFound"),
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
_, err := secretCache.GetSecretStringWithStage("test", "versionStage-42")
if err == nil {
t.Fatalf("Expected error: secretNotFound for a missing secret")
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected a single call to DescribeSecret API, got %d", mockClient.DescribeSecretCallCount)
}
}
func TestGetSecretBinaryMultipleNotFound(t *testing.T) {
mockClient := mockSecretsManagerClient{
GetSecretValueErr: errors.New("versionNotFound"),
DescribeSecretErr: errors.New("secretNotFound"),
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
_, err := secretCache.GetSecretBinaryWithStage("test", "versionStage-42")
if err == nil {
t.Fatalf("Expected error: secretNotFound for a missing secret")
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected a single call to DescribeSecret API, got %d", mockClient.DescribeSecretCallCount)
}
}
func TestGetSecretVersionStageEmpty(t *testing.T) {
mockClient, _, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
result, err := secretCache.GetSecretStringWithStage("test", "")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
//New cache for new config
secretCache, _ = secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.VersionStage = "" },
)
result, err = secretCache.GetSecretStringWithStage("test", "")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
| 392 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
type baseError struct {
Message string
}
type VersionNotFoundError struct {
baseError
}
func (v *VersionNotFoundError) Error() string {
return v.Message
}
type InvalidConfigError struct {
baseError
}
func (i *InvalidConfigError) Error() string {
return i.Message
}
type InvalidOperationError struct {
baseError
}
func (i *InvalidOperationError) Error() string {
return i.Message
}
| 43 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
import (
"sync"
)
// lruCache is a cache implementation using a map and doubly linked list.
type lruCache struct {
cacheMap map[string]*lruItem
cacheMaxSize int
cacheSize int
mux sync.Mutex
head *lruItem
tail *lruItem
}
// lruItem is the cache item to hold data and linked list pointers.
type lruItem struct {
next *lruItem
prev *lruItem
key string
data interface{}
}
// newLRUCache initialises an lruCache instance with given max size.
func newLRUCache(maxSize int) *lruCache {
return &lruCache{
cacheMap: make(map[string]*lruItem),
cacheMaxSize: maxSize,
}
}
// get gets the cached item's data for the given key.
// Updates the fetched item to be head of the linked list.
func (l *lruCache) get(key string) (interface{}, bool) {
l.mux.Lock()
defer l.mux.Unlock()
item, found := l.cacheMap[key]
if !found {
return nil, false
}
l.updateHead(item)
return item.data, true
}
// putIfAbsent puts an lruItem initialised from the given data in the cache.
// Updates head of the linked list to be the new lruItem.
// If cache size is over max allowed size, removes the tail item from cache.
// Returns true if new key is inserted to cache, false if it already existed.
func (l *lruCache) putIfAbsent(key string, data interface{}) bool {
l.mux.Lock()
defer l.mux.Unlock()
_, found := l.cacheMap[key]
if found {
return false
}
item := &lruItem{key: key, data: data}
l.cacheMap[key] = item
l.cacheSize++
l.updateHead(item)
if l.cacheSize > l.cacheMaxSize {
delete(l.cacheMap, (*l.tail).key)
l.unlink(l.tail)
l.cacheSize--
}
return true
}
// updateHead updates head of the linked list to be the input lruItem.
func (l *lruCache) updateHead(item *lruItem) {
if l.head == item {
return
}
l.unlink(item)
item.next = l.head
if l.head != nil {
l.head.prev = item
}
l.head = item
if l.tail == nil {
l.tail = item
}
}
// unlink removes the input lruItem from the linked list.
func (l *lruCache) unlink(item *lruItem) {
if l.head == item {
l.head = item.next
}
if l.tail == item {
l.tail = item.prev
}
if item.prev != nil {
item.prev.next = item.next
}
if item.next != nil {
item.next.prev = item.prev
}
}
| 131 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
import (
"strconv"
"testing"
)
func TestPutIfAbsent(t *testing.T) {
lruCache := newLRUCache(DefaultMaxCacheSize)
key := "some-key"
data := 42
addedToCache := lruCache.putIfAbsent(key, data)
if !addedToCache {
t.Fatalf("Failed initial add to cache")
}
addedToCache = lruCache.putIfAbsent(key, data*2)
if addedToCache {
t.Fatalf("Should have failed second add to cache")
}
retrievedItem, found := lruCache.cacheMap[key]
if !found {
t.Fatalf("Did not find expected entry in cache")
}
if (*retrievedItem).data != data {
t.Fatalf("Expected data %d did not match retrieved data %d", data, (*retrievedItem).data)
}
if lruCache.cacheSize != 1 {
t.Fatalf("Expected cache size to be 1")
}
}
func TestGet(t *testing.T) {
lruCache := newLRUCache(DefaultMaxCacheSize)
key := "some-key"
data := 42
_, found := lruCache.get(key)
if found {
t.Fatalf("Did not expect entry in cache")
}
lruCache.putIfAbsent(key, data)
retrievedData, found := lruCache.get(key)
if !found {
t.Fatalf("Did not find expected entry in cache")
}
if retrievedData != data {
t.Fatalf("Expected data %d did not match retrieved data %d", data, retrievedData)
}
}
func TestLRUCacheMax(t *testing.T) {
lruCache := newLRUCache(10)
for i := 0; i <= 100; i++ {
lruCache.putIfAbsent(strconv.Itoa(i), i)
}
for i := 0; i <= 90; i++ {
if _, found := lruCache.get(strconv.Itoa(i)); found {
t.Fatalf("Found unexpected val in cache - %d", i)
}
}
for i := 91; i <= 100; i++ {
if val, found := lruCache.get(strconv.Itoa(i)); !found || i != val.(int) {
t.Fatalf("Expected to find val in cache - %d", i)
}
}
}
func TestLRUCacheEmpty(t *testing.T) {
lruCache := newLRUCache(10)
_, found := lruCache.get("some-key")
if found {
t.Fatalf("Found unexpected val in cache")
}
}
func TestLRUCacheRecent(t *testing.T) {
lruCache := newLRUCache(10)
for i := 0; i <= 100; i++ {
lruCache.putIfAbsent(strconv.Itoa(i), i)
lruCache.get("0")
}
for i := 1; i <= 91; i++ {
if _, found := lruCache.get(strconv.Itoa(i)); found {
t.Fatalf("Found unexpected val in cache - %d", i)
}
}
for i := 92; i <= 100; i++ {
if val, found := lruCache.get(strconv.Itoa(i)); !found || i != val.(int) {
t.Fatalf("Expected to find val in cache - %d", i)
}
}
if val, found := lruCache.get("0"); !found || val.(int) != 0 {
t.Fatalf("Expected to find val in cache - %d", 0)
}
}
func TestLRUCacheZero(t *testing.T) {
lruCache := newLRUCache(0)
for i := 0; i <= 100; i++ {
strI := strconv.Itoa(i)
lruCache.putIfAbsent(strI, i)
if _, found := lruCache.get(strI); found {
t.Fatalf("Found unexpected val in cache - %d", i)
}
}
for i := 0; i <= 100; i++ {
if _, found := lruCache.get(strconv.Itoa(i)); found {
t.Fatalf("Found unexpected val in cache - %d", i)
}
}
}
func TestLRUCacheOne(t *testing.T) {
lruCache := newLRUCache(1)
for i := 0; i <= 100; i++ {
strI := strconv.Itoa(i)
lruCache.putIfAbsent(strI, i)
if val, found := lruCache.get(strI); !found || i != val.(int) {
t.Fatalf("Expected to find val in cache - %d", i)
}
}
for i := 0; i <= 99; i++ {
if _, found := lruCache.get(strconv.Itoa(i)); found {
t.Fatalf("Found unexpected val in cache - %d", i)
}
}
}
func TestConcurrentAccess(t *testing.T) {
cache := newLRUCache(1)
cache.putIfAbsent("key", "value")
failed := make(chan bool)
go accessor(cache, 500, "key", "value", failed)
go accessor(cache, 400, "key", "value", failed)
go accessor(cache, 300, "key", "value", failed)
go accessor(cache, 600, "key", "value", failed)
for i := 0; i < 4; i++ {
if <-failed {
t.Fatalf("Expected value not found")
}
}
}
func accessor(cache *lruCache, n int, key string, value string, failed chan bool) {
for i := 0; i < n; i++ {
if val, found := cache.get(key); !found || val != value {
failed <- true
}
}
failed <- false
}
func TestConcurrentMutations(t *testing.T) {
cache := newLRUCache(1)
failed := make(chan bool)
go mutator(cache, 500, "key")
go mutator(cache, 400, "key")
go accessor(cache, 300, "key", "value", failed)
go accessor(cache, 600, "key", "value", failed)
}
func mutator(cache *lruCache, n int, key string) {
for i := 0; i < n; i++ {
cache.putIfAbsent(key, i)
}
}
| 209 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache_test
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface"
)
// A struct to be used in unit tests as a mock Client
type mockSecretsManagerClient struct {
secretsmanageriface.SecretsManagerAPI
MockedGetResult *secretsmanager.GetSecretValueOutput
MockedDescribeResult *secretsmanager.DescribeSecretOutput
GetSecretValueErr error
DescribeSecretErr error
GetSecretValueCallCount int
DescribeSecretCallCount int
}
// Initialises a mock Client with dummy outputs for GetSecretValue and DescribeSecret APIs
func newMockedClientWithDummyResults() (mockSecretsManagerClient, string, string) {
createDate := time.Now().Add(-time.Hour * 12) // 12 hours ago
versionId := getStrPtr("very-random-uuid")
otherVersionId := getStrPtr("other-random-uuid")
versionStages := []*string{getStrPtr("hello"), getStrPtr("versionStage-42"), getStrPtr("AWSCURRENT")}
otherVersionStages := []*string{getStrPtr("AWSPREVIOUS")}
versionIdsToStages := make(map[string][]*string)
versionIdsToStages[*versionId] = versionStages
versionIdsToStages[*otherVersionId] = otherVersionStages
secretId := getStrPtr("dummy-secret-name")
secretString := getStrPtr("my secret string")
mockedGetResult := secretsmanager.GetSecretValueOutput{
ARN: getStrPtr("dummy-arn"),
CreatedDate: &createDate,
Name: secretId,
SecretString: secretString,
VersionId: versionId,
VersionStages: versionStages,
}
mockedDescribeResult := secretsmanager.DescribeSecretOutput{
ARN: getStrPtr("dummy-arn"),
Name: secretId,
Description: getStrPtr("my dummy description"),
VersionIdsToStages: versionIdsToStages,
}
return mockSecretsManagerClient{
MockedDescribeResult: &mockedDescribeResult,
MockedGetResult: &mockedGetResult,
}, *secretId, *secretString
}
// Overrides the interface method to return dummy result.
func (m *mockSecretsManagerClient) GetSecretValueWithContext(context aws.Context, input *secretsmanager.GetSecretValueInput, opts ...request.Option) (*secretsmanager.GetSecretValueOutput, error) {
m.GetSecretValueCallCount++
if m.GetSecretValueErr != nil {
return nil, m.GetSecretValueErr
}
return m.MockedGetResult, nil
}
// Overrides the interface method to return dummy result.
func (m *mockSecretsManagerClient) DescribeSecretWithContext(context aws.Context, input *secretsmanager.DescribeSecretInput, opts ...request.Option) (*secretsmanager.DescribeSecretOutput, error) {
m.DescribeSecretCallCount++
if m.DescribeSecretErr != nil {
return nil, m.DescribeSecretErr
}
return m.MockedDescribeResult, nil
}
// Helper function to get a string pointer for input string.
func getStrPtr(str string) *string {
return &str
}
| 97 |
aws-secretsmanager-caching-go | aws | Go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache
const (
VersionNumber = "1"
MajorRevisionNumber = "1"
MinorRevisionNumber = "2"
BugfixRevisionNumber = "0"
)
// releaseVersion builds the version string
func releaseVersion() string {
return VersionNumber + "." + MajorRevisionNumber + "." + MinorRevisionNumber + "." + BugfixRevisionNumber
}
// userAgent builds the user agent string to be appended to outgoing requests to the secrets manager API
func userAgent() string {
return "AwsSecretCache/" + releaseVersion()
}
| 32 |
aws-sigv4-auth-cassandra-gocql-driver-plugin | aws | Go | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// provides sigv4 extensions to connect to Amazon Keyspaces
package sigv4
import (
"os"
"time"
"github.com/aws/aws-sigv4-auth-cassandra-gocql-driver-plugin/sigv4/internal"
"github.com/gocql/gocql"
)
// Authenticator for AWS Integration
// these are exposed publicly to allow for easy initialization and go standard changing after the fact.
type AwsAuthenticator struct {
Region string
AccessKeyId string
SecretAccessKey string
SessionToken string
currentTime time.Time // this is mainly used for testing and not exposed
}
// looks up AWS_DEFAULT_REGION, and falls back to AWS_REGION for Lambda compatibility
func getRegionEnvironment() string {
region := os.Getenv("AWS_DEFAULT_REGION")
if len(region) == 0 {
region = os.Getenv("AWS_REGION")
}
return region
}
// initializes authenticator with standard AWS CLI environment variables if they exist.
func NewAwsAuthenticator() AwsAuthenticator {
return AwsAuthenticator{
Region: getRegionEnvironment(),
AccessKeyId: os.Getenv("AWS_ACCESS_KEY_ID"),
SecretAccessKey: os.Getenv("AWS_SECRET_ACCESS_KEY"),
SessionToken: os.Getenv("AWS_SESSION_TOKEN")}
}
func (p AwsAuthenticator) Challenge(req []byte) ([]byte, gocql.Authenticator, error) {
var resp []byte = []byte("SigV4\000\000")
// copy these rather than use a reference due to how gocql creates connections (its just
// safer if everything if a fresh copy).
auth := signingAuthenticator{region: p.Region,
accessKeyId: p.AccessKeyId,
secretAccessKey: p.SecretAccessKey,
sessionToken: p.SessionToken,
currentTime: p.currentTime}
return resp, auth, nil
}
func (p AwsAuthenticator) Success(data []byte) error {
return nil
}
// this is the internal private authenticator we actually use
type signingAuthenticator struct {
region string
accessKeyId string
secretAccessKey string
sessionToken string
currentTime time.Time
}
func (p signingAuthenticator) Challenge(req []byte) ([]byte, gocql.Authenticator, error) {
nonce, err := internal.ExtractNonce(req)
if err != nil {
return nil, nil, err
}
// init the time if not provided.
var t time.Time = p.currentTime
if t.IsZero() {
t = time.Now().UTC()
}
signedResponse := internal.BuildSignedResponse(p.region, nonce, p.accessKeyId,
p.secretAccessKey, p.sessionToken, t)
// copy this to a sepearte byte array to prevent some slicing corruption with how the framer object works
resp := make([]byte, len(signedResponse))
copy(resp, []byte(signedResponse))
return resp, nil, nil
}
func (p signingAuthenticator) Success(data []byte) error {
return nil
}
| 109 |
aws-sigv4-auth-cassandra-gocql-driver-plugin | aws | Go | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sigv4
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var stdNonce = []byte("nonce=91703fdc2ef562e19fbdab0f58e42fe5")
// We should switch to sigv4 when initially challenged
func TestShouldReturnSigV4iInitially(t *testing.T) {
target := AwsAuthenticator{}
resp, _, _ := target.Challenge(nil)
assert.Equal(t, "SigV4\000\000", string(resp))
}
func TestShouldTranslate(t *testing.T) {
target := buildStdTarget()
_, challenger, _ := target.Challenge(nil)
resp, _, _ := challenger.Challenge(stdNonce)
expected := "signature=7f3691c18a81b8ce7457699effbfae5b09b4e0714ab38c1292dbdf082c9ddd87,access_key=UserID-1,amzdate=2020-06-09T22:41:51.000Z"
assert.Equal(t, expected, string(resp))
}
func TestAssignFallbackRegionEnvironmentVariable(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
os.Setenv("AWS_REGION", "us-east-2")
defaultRegionTarget := NewAwsAuthenticator()
assert.Equal(t, "us-west-2", defaultRegionTarget.Region)
os.Unsetenv("AWS_DEFAULT_REGION")
regionTarget := NewAwsAuthenticator()
assert.Equal(t, "us-east-2", regionTarget.Region)
os.Unsetenv("AWS_REGION")
}
func buildStdTarget() *AwsAuthenticator {
target := AwsAuthenticator{
Region: "us-west-2",
AccessKeyId: "UserID-1",
SecretAccessKey: "UserSecretKey-1"}
target.currentTime, _ = time.Parse(time.RFC3339, "2020-06-09T22:41:51Z")
return &target
}
| 71 |
aws-sigv4-auth-cassandra-gocql-driver-plugin | aws | Go | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package internal
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"net/url"
"sort"
"strings"
"time"
)
// extract the nonce from a request payload
// needed for calls from payload returned by Amazon Keyspaces.
func ExtractNonce(req []byte) (string, error) {
text := string(req)
if !strings.HasPrefix(text, "nonce=") {
return "", errors.New("request does not contain nonce property")
}
nonce := strings.Split(text, "nonce=")[1]
return nonce, nil
}
// Convert time to an aws credential timestamp
// such as 2020-06-09T22:41:51.000Z -> '20200609'
func toCredDateStamp(t time.Time) string {
return fmt.Sprintf("%d%02d%02d", t.Year(), t.Month(), t.Day())
}
// compute the scope to be used in the request
func computeScope(t time.Time, region string) string {
a := []string{
toCredDateStamp(t),
region,
"cassandra",
"aws4_request"}
return strings.Join(a, "/")
}
func formCanonicalRequest(accessKeyId string, scope string, t time.Time, nonce string) string {
nonceHash := sha256.Sum256([]byte(nonce))
headers := []string{
"X-Amz-Algorithm=AWS4-HMAC-SHA256",
fmt.Sprintf("X-Amz-Credential=%s%%2F%s", accessKeyId, url.QueryEscape(scope)),
fmt.Sprintf("X-Amz-Date=%s", url.QueryEscape(t.Format("2006-01-02T15:04:05.000Z"))),
"X-Amz-Expires=900"}
sort.Strings(headers)
queryString := strings.Join(headers, "&")
return fmt.Sprintf("PUT\n/authenticate\n%s\nhost:cassandra\n\nhost\n%s", queryString, hex.EncodeToString(nonceHash[:]))
}
// applies hmac with given string
// useful as our protocol requires lots of iterative hmacs
func applyHmac(data string, hashSecret []byte) []byte {
h := hmac.New(sha256.New, hashSecret)
h.Write([]byte(data))
return h.Sum(nil)
}
func deriveSigningKey(secret string, t time.Time, region string) []byte {
// we successively apply the hmac secret in multiple iterations rather then simply
// write it once (as per the Amazon Keyspaces protocol)
s := "AWS4" + secret
h := applyHmac(toCredDateStamp(t), []byte(s))
h = applyHmac(region, h)
h = applyHmac("cassandra", h)
h = applyHmac("aws4_request", h)
return h
}
func createSignature(canonicalRequest string, t time.Time, signingScope string, signingKey []byte) []byte {
digest := sha256.Sum256([]byte(canonicalRequest))
s := fmt.Sprintf("AWS4-HMAC-SHA256\n%s\n%s\n%s", t.Format("2006-01-02T15:04:05.000Z"), signingScope, hex.EncodeToString(digest[:]))
return applyHmac(s, []byte(signingKey))
}
// creates response that can be sent for a SigV4 challenge
// this includes both the signature and the metadata supporting signature.
func BuildSignedResponse(region string, nonce string, accessKeyId string, secret string, sessionToken string, t time.Time) string {
scope := computeScope(t, region)
canonicalRequest := formCanonicalRequest(accessKeyId, scope, t, nonce)
signingKey := deriveSigningKey(secret, t, region)
signature := createSignature(canonicalRequest, t, scope, signingKey)
result := fmt.Sprintf("signature=%s,access_key=%s,amzdate=%s", hex.EncodeToString(signature), accessKeyId, t.Format("2006-01-02T15:04:05.000Z"))
if sessionToken != "" {
result += fmt.Sprintf(",session_token=%s", sessionToken)
}
return result
}
| 114 |
aws-sigv4-auth-cassandra-gocql-driver-plugin | aws | Go | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package internal
import (
"encoding/hex"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
const nonce string = "91703fdc2ef562e19fbdab0f58e42fe5"
const region = "us-west-2"
const accessKeyId = "UserID-1"
const secret = "UserSecretKey-1"
// produce arbitrary time 2020-06-09T22:41:51Z
func buildStdInstant() time.Time {
result, _ := time.Parse(time.RFC3339, "2020-06-09T22:41:51Z")
return result
}
// We should switch to sigv4 when initially challenged
func TestExtractNonceSuccess(t *testing.T) {
challenge := []byte("nonce=1256")
actualNonce, _ := ExtractNonce(challenge)
assert.Equal(t, actualNonce, "1256")
}
func TestExtractNonceMissing(t *testing.T) {
challenge := []byte("n1256")
_, err := ExtractNonce(challenge)
assert.Error(t, err)
}
func TestComputeScope(t *testing.T) {
scope := computeScope(buildStdInstant(), "us-west-2")
assert.Equal(t, "20200609/us-west-2/cassandra/aws4_request", scope)
}
func TestFormCanonicalRequest(t *testing.T) {
scope := "20200609/us-west-2/cassandra/aws4_request"
canonicalRequest := "PUT\n" +
"/authenticate\n" +
"X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=UserID-1%2F20200609%2Fus-west-2%2Fcassandra%2Faws4_request&X-Amz-Date=2020-06-09T22%3A41%3A51.000Z&X-Amz-Expires=900\n" +
"host:cassandra\n\n" +
"host\n" +
"ddf250111597b3f35e51e649f59e3f8b30ff5b247166d709dc1b1e60bd927070"
actual := formCanonicalRequest("UserID-1", scope, buildStdInstant(), nonce)
assert.Equal(t, canonicalRequest, actual)
}
func TestDeriveSigningKey(t *testing.T) {
expected := "7fb139473f153aec1b05747b0cd5cd77a1186d22ae895a3a0128e699d72e1aba"
actual := deriveSigningKey(secret, buildStdInstant(), region)
assert.Equal(t, expected, hex.EncodeToString(actual))
}
func TestCreateSignature(t *testing.T) {
signingKey, _ := hex.DecodeString("7fb139473f153aec1b05747b0cd5cd77a1186d22ae895a3a0128e699d72e1aba")
scope := "20200609/us-west-2/cassandra/aws4_request"
canonicalRequest := "PUT\n" +
"/authenticate\n" +
"X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=UserID-1%2F20200609%2Fus-west-2%2Fcassandra%2Faws4_request&X-Amz-Date=2020-06-09T22%3A41%3A51.000Z&X-Amz-Expires=900\n" +
"host:cassandra\n\n" +
"host\n" +
"ddf250111597b3f35e51e649f59e3f8b30ff5b247166d709dc1b1e60bd927070"
actual := createSignature(canonicalRequest, buildStdInstant(), scope, signingKey)
expected := "7f3691c18a81b8ce7457699effbfae5b09b4e0714ab38c1292dbdf082c9ddd87"
assert.Equal(t, expected, hex.EncodeToString(actual))
}
func TestBuildSignedResponse(t *testing.T) {
actual := BuildSignedResponse(region, nonce, accessKeyId, secret, "", buildStdInstant())
expected := "signature=7f3691c18a81b8ce7457699effbfae5b09b4e0714ab38c1292dbdf082c9ddd87,access_key=UserID-1,amzdate=2020-06-09T22:41:51.000Z"
assert.Equal(t, expected, actual)
}
func TestBuildSignedResponseWithSessionToken(t *testing.T) {
sessionToken := "sess-token-1"
actual := BuildSignedResponse(region, nonce, accessKeyId, secret, sessionToken, buildStdInstant())
expected := "signature=7f3691c18a81b8ce7457699effbfae5b09b4e0714ab38c1292dbdf082c9ddd87,access_key=UserID-1,amzdate=2020-06-09T22:41:51.000Z,session_token=sess-token-1"
assert.Equal(t, expected, actual)
}
| 103 |
aws-toolkit-jetbrains | aws | Go | // NOTE this is flat because of VGO mapping
package main
import (
"github.com/aws/aws-lambda-go/lambda"
"strings"
)
func handler(request string) (string, error) {
return strings.ToUpper(request), nil
}
func main() {
lambda.Start(handler)
}
| 16 |
aws-toolkit-vscode | aws | Go | package main
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
var (
// DefaultHTTPGetAddress Default Address
DefaultHTTPGetAddress = "https://checkip.amazonaws.com"
// ErrNoIP No IP found in response
ErrNoIP = errors.New("No IP in HTTP response")
// ErrNon200Response non 200 status code in response
ErrNon200Response = errors.New("Non 200 Response found")
)
func handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
resp, err := http.Get(DefaultHTTPGetAddress)
if err != nil {
return events.APIGatewayProxyResponse{}, err
}
if resp.StatusCode != 200 {
return events.APIGatewayProxyResponse{}, ErrNon200Response
}
ip, err := ioutil.ReadAll(resp.Body)
if err != nil {
return events.APIGatewayProxyResponse{}, err
}
if len(ip) == 0 {
return events.APIGatewayProxyResponse{}, ErrNoIP
}
return events.APIGatewayProxyResponse{
Body: fmt.Sprintf("Hello, %v", string(ip)),
StatusCode: 200,
}, nil
}
func main() {
lambda.Start(handler)
}
| 52 |
aws-toolkit-vscode | aws | Go | package main
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/aws/aws-lambda-go/events"
)
func TestHandler(t *testing.T) {
t.Run("Unable to get IP", func(t *testing.T) {
DefaultHTTPGetAddress = "http://127.0.0.1:12345"
_, err := handler(events.APIGatewayProxyRequest{})
if err == nil {
t.Fatal("Error failed to trigger with an invalid request")
}
})
t.Run("Non 200 Response", func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
}))
defer ts.Close()
DefaultHTTPGetAddress = ts.URL
_, err := handler(events.APIGatewayProxyRequest{})
if err != nil && err.Error() != ErrNon200Response.Error() {
t.Fatalf("Error failed to trigger with an invalid HTTP response: %v", err)
}
})
t.Run("Unable decode IP", func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
}))
defer ts.Close()
DefaultHTTPGetAddress = ts.URL
_, err := handler(events.APIGatewayProxyRequest{})
if err == nil {
t.Fatal("Error failed to trigger with an invalid HTTP response")
}
})
t.Run("Successful Request", func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
fmt.Fprintf(w, "127.0.0.1")
}))
defer ts.Close()
DefaultHTTPGetAddress = ts.URL
_, err := handler(events.APIGatewayProxyRequest{})
if err != nil {
t.Fatal("Everything should be ok")
}
})
}
| 65 |
aws-toolkit-vscode | aws | Go | package main
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
var (
// DefaultHTTPGetAddress Default Address
DefaultHTTPGetAddress = "https://checkip.amazonaws.com"
// ErrNoIP No IP found in response
ErrNoIP = errors.New("No IP in HTTP response")
// ErrNon200Response non 200 status code in response
ErrNon200Response = errors.New("Non 200 Response found")
)
func handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
resp, err := http.Get(DefaultHTTPGetAddress)
if err != nil {
return events.APIGatewayProxyResponse{}, err
}
if resp.StatusCode != 200 {
return events.APIGatewayProxyResponse{}, ErrNon200Response
}
ip, err := ioutil.ReadAll(resp.Body)
if err != nil {
return events.APIGatewayProxyResponse{}, err
}
if len(ip) == 0 {
return events.APIGatewayProxyResponse{}, ErrNoIP
}
return events.APIGatewayProxyResponse{
Body: fmt.Sprintf("Hello, %v", string(ip)),
StatusCode: 200,
}, nil
}
func main() {
lambda.Start(handler)
}
| 52 |
aws-toolkit-vscode | aws | Go | package main
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/aws/aws-lambda-go/events"
)
func TestHandler(t *testing.T) {
t.Run("Unable to get IP", func(t *testing.T) {
DefaultHTTPGetAddress = "http://127.0.0.1:12345"
_, err := handler(events.APIGatewayProxyRequest{})
if err == nil {
t.Fatal("Error failed to trigger with an invalid request")
}
})
t.Run("Non 200 Response", func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
}))
defer ts.Close()
DefaultHTTPGetAddress = ts.URL
_, err := handler(events.APIGatewayProxyRequest{})
if err != nil && err.Error() != ErrNon200Response.Error() {
t.Fatalf("Error failed to trigger with an invalid HTTP response: %v", err)
}
})
t.Run("Unable decode IP", func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(500)
}))
defer ts.Close()
DefaultHTTPGetAddress = ts.URL
_, err := handler(events.APIGatewayProxyRequest{})
if err == nil {
t.Fatal("Error failed to trigger with an invalid HTTP response")
}
})
t.Run("Successful Request", func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
fmt.Fprintf(w, "127.0.0.1")
}))
defer ts.Close()
DefaultHTTPGetAddress = ts.URL
_, err := handler(events.APIGatewayProxyRequest{})
if err != nil {
t.Fatal("Everything should be ok")
}
})
}
| 65 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package main
import (
"encoding/json"
"fmt"
"io"
"math"
"net"
"os"
"runtime/pprof"
"sync/atomic"
"time"
"github.com/aws/aws-xray-daemon/pkg/bufferpool"
"github.com/aws/aws-xray-daemon/pkg/cfg"
"github.com/aws/aws-xray-daemon/pkg/cli"
"github.com/aws/aws-xray-daemon/pkg/conn"
"github.com/aws/aws-xray-daemon/pkg/logger"
"github.com/aws/aws-xray-daemon/pkg/processor"
"github.com/aws/aws-xray-daemon/pkg/profiler"
"github.com/aws/aws-xray-daemon/pkg/proxy"
"github.com/aws/aws-xray-daemon/pkg/ringbuffer"
"github.com/aws/aws-xray-daemon/pkg/socketconn"
"github.com/aws/aws-xray-daemon/pkg/socketconn/udp"
"github.com/aws/aws-xray-daemon/pkg/telemetry"
"github.com/aws/aws-xray-daemon/pkg/tracesegment"
"github.com/aws/aws-xray-daemon/pkg/util"
"github.com/aws/aws-sdk-go/aws"
log "github.com/cihub/seelog"
"github.com/shirou/gopsutil/mem"
)
var receiverCount int
var processorCount int
var config *cfg.Config
const protocolSeparator = "\n"
// Log Rotation Size is 50 MB
const logRotationSize int64 = 50 * 1024 * 1024
var udpAddress string
var tcpAddress string
var socketConnection string
var cpuProfile string
var memProfile string
var roleArn string
var receiveBufferSize int
var daemonProcessBufferMemoryMB int
var logFile string
var configFilePath string
var resourceARN string
var noMetadata bool
var version bool
var logLevel string
var regionFlag string
var proxyAddress string
// Daemon reads trace segments from X-Ray daemon address and
// send to X-Ray service.
type Daemon struct {
// Boolean channel, set to true if error is received reading from Socket.
done chan bool
// Ring buffer, used to stored segments received.
std *ringbuffer.RingBuffer
// Counter for segments read by daemon.
count uint64
// Instance of socket connection.
sock socketconn.SocketConn
// Reference to buffer pool.
pool *bufferpool.BufferPool
// Reference to Processor.
processor *processor.Processor
// HTTP Proxy server
server *proxy.Server
}
func init() {
f, c := initCli("")
f.ParseFlags()
cfg.LogFile = logFile // storing log file passed through command line
// if config file is passed using command line argument parse flags again with default equal to config file
if configFilePath != "" {
cfg.ConfigValidation(configFilePath)
f, c = initCli(configFilePath)
f.ParseFlags()
}
if version {
fmt.Printf("AWS X-Ray daemon version: %v\n", cfg.Version)
os.Exit(0)
}
config = c
config.ProxyAddress = proxyAddress
}
func initCli(configFile string) (*cli.Flag, *cfg.Config) {
flag := cli.NewFlag("X-Ray Daemon")
cnfg := cfg.LoadConfig(configFile)
processorCount = cnfg.Concurrency
var (
defaultDaemonProcessSpaceLimitMB = cnfg.TotalBufferSizeMB
defaultLogPath = cnfg.Logging.LogPath
defaultLogLevel = cnfg.Logging.LogLevel
defaultUDPAddress = cnfg.Socket.UDPAddress
defaultTCPAddress = cnfg.Socket.TCPAddress
defaultRoleARN = cnfg.RoleARN
defaultLocalMode = cnfg.LocalMode
defaultRegion = cnfg.Region
defaultResourceARN = cnfg.ResourceARN
defaultProxyAddress = cnfg.ProxyAddress
)
socketConnection = "UDP"
regionFlag = defaultRegion
flag.StringVarF(&resourceARN, "resource-arn", "a", defaultResourceARN, "Amazon Resource Name (ARN) of the AWS resource running the daemon.")
flag.BoolVarF(&noMetadata, "local-mode", "o", *defaultLocalMode, "Don't check for EC2 instance metadata.")
flag.IntVarF(&daemonProcessBufferMemoryMB, "buffer-memory", "m", defaultDaemonProcessSpaceLimitMB, "Change the amount of memory in MB that buffers can use (minimum 3).")
flag.StringVarF(®ionFlag, "region", "n", defaultRegion, "Send segments to X-Ray service in a specific region.")
flag.StringVarF(&udpAddress, "bind", "b", defaultUDPAddress, "Overrides default UDP address (127.0.0.1:2000).")
flag.StringVarF(&tcpAddress, "bind-tcp", "t", defaultTCPAddress, "Overrides default TCP address (127.0.0.1:2000).")
flag.StringVarF(&roleArn, "role-arn", "r", defaultRoleARN, "Assume the specified IAM role to upload segments to a different account.")
flag.StringVarF(&configFilePath, "config", "c", "", "Load a configuration file from the specified path.")
flag.StringVarF(&logFile, "log-file", "f", defaultLogPath, "Output logs to the specified file path.")
flag.StringVarF(&logLevel, "log-level", "l", defaultLogLevel, "Log level, from most verbose to least: dev, debug, info, warn, error, prod (default).")
flag.StringVarF(&proxyAddress, "proxy-address", "p", defaultProxyAddress, "Proxy address through which to upload segments.")
flag.BoolVarF(&version, "version", "v", false, "Show AWS X-Ray daemon version.")
return flag, cnfg
}
func initDaemon(config *cfg.Config) *Daemon {
if logFile != "" {
var fileWriter io.Writer
if *config.Logging.LogRotation {
// Empty Archive path as code does not archive logs
apath := ""
maxSize := logRotationSize
// Keep one rolled over log file around
maxRolls := 1
archiveExplode := false
fileWriter, _ = log.NewRollingFileWriterSize(logFile, 0, apath, maxSize, maxRolls, 0, archiveExplode)
} else {
fileWriter, _ = log.NewFileWriter(logFile)
}
logger.LoadLogConfig(fileWriter, config, logLevel)
} else {
newWriter, _ := log.NewConsoleWriter()
logger.LoadLogConfig(newWriter, config, logLevel)
}
defer log.Flush()
log.Infof("Initializing AWS X-Ray daemon %v", cfg.Version)
parameterConfig := cfg.ParameterConfigValue
receiverCount = parameterConfig.ReceiverRoutines
receiveBufferSize = parameterConfig.Socket.BufferSizeKB * 1024
cpuProfile = os.Getenv("XRAY_DAEMON_CPU_PROFILE")
memProfile = os.Getenv("XRAY_DAEMON_MEMORY_PROFILE")
profiler.EnableCPUProfile(&cpuProfile)
defer pprof.StopCPUProfile()
var sock socketconn.SocketConn
sock = udp.New(udpAddress)
memoryLimit := evaluateBufferMemory(daemonProcessBufferMemoryMB)
log.Infof("Using buffer memory limit of %v MB", memoryLimit)
buffers, err := bufferpool.GetPoolBufferCount(memoryLimit, receiveBufferSize)
if err != nil {
log.Errorf("%v", err)
os.Exit(1)
}
log.Infof("%v segment buffers allocated", buffers)
bufferPool := bufferpool.Init(buffers, receiveBufferSize)
std := ringbuffer.New(buffers, bufferPool)
if config.Endpoint != "" {
log.Debugf("Using Endpoint read from Config file: %s", config.Endpoint)
}
awsConfig, session := conn.GetAWSConfigSession(&conn.Conn{}, config, roleArn, regionFlag, noMetadata)
log.Infof("Using region: %v", aws.StringValue(awsConfig.Region))
log.Debugf("ARN of the AWS resource running the daemon: %v", resourceARN)
telemetry.Init(awsConfig, session, resourceARN, noMetadata)
// If calculated number of buffer is lower than our default, use calculated one. Otherwise, use default value.
parameterConfig.Processor.BatchSize = util.GetMinIntValue(parameterConfig.Processor.BatchSize, buffers)
config.Socket.TCPAddress = tcpAddress // assign final tcp address either through config file or cmd line
// Create proxy http server
server, err := proxy.NewServer(config, awsConfig, session)
if err != nil {
log.Errorf("Unable to start http proxy server: %v", err)
os.Exit(1)
}
daemon := &Daemon{
done: make(chan bool),
std: std,
pool: bufferPool,
count: 0,
sock: sock,
server: server,
processor: processor.New(awsConfig, session, processorCount, std, bufferPool, parameterConfig),
}
return daemon
}
func runDaemon(daemon *Daemon) {
// Start http server for proxying requests to xray
go daemon.server.Serve()
for i := 0; i < receiverCount; i++ {
go daemon.poll()
}
}
func (d *Daemon) close() {
for i := 0; i < receiverCount; i++ {
<-d.done
}
// Signal routines to finish
// This will push telemetry and customer segments in parallel
d.std.Close()
telemetry.T.Quit <- true
<-d.processor.Done
<-telemetry.T.Done
profiler.MemSnapShot(&memProfile)
log.Debugf("Trace segment: received: %d, truncated: %d, processed: %d", atomic.LoadUint64(&d.count), d.std.TruncatedCount(), d.processor.ProcessedCount())
log.Debugf("Shutdown finished. Current epoch in nanoseconds: %v", time.Now().UnixNano())
}
func (d *Daemon) stop() {
d.sock.Close()
d.server.Close()
}
// Returns number of bytes read from socket connection.
func (d *Daemon) read(buf *[]byte) int {
bufVal := *buf
rlen, err := d.sock.Read(bufVal)
switch err := err.(type) {
case net.Error:
if !err.Temporary() {
d.done <- true
return -1
}
log.Errorf("daemon: net: err: %v", err)
return 0
case error:
log.Errorf("daemon: socket: err: %v", err)
return 0
}
return rlen
}
func (d *Daemon) poll() {
separator := []byte(protocolSeparator)
fallBackBuffer := make([]byte, receiveBufferSize)
splitBuf := make([][]byte, 2)
for {
bufPointer := d.pool.Get()
fallbackPointerUsed := false
if bufPointer == nil {
log.Debug("Pool does not have any buffer.")
bufPointer = &fallBackBuffer
fallbackPointerUsed = true
}
rlen := d.read(bufPointer)
if rlen > 0 {
telemetry.T.SegmentReceived(1)
}
if rlen == 0 {
if !fallbackPointerUsed {
d.pool.Return(bufPointer)
}
continue
}
if fallbackPointerUsed {
log.Warn("Segment dropped. Consider increasing memory limit")
telemetry.T.SegmentSpillover(1)
continue
} else if rlen == -1 {
return
}
buf := *bufPointer
bufMessage := buf[0:rlen]
slices := util.SplitHeaderBody(&bufMessage, &separator, &splitBuf)
if len(slices[1]) == 0 {
log.Warnf("Missing header or segment: %s", string(slices[0]))
d.pool.Return(bufPointer)
telemetry.T.SegmentRejected(1)
continue
}
header := slices[0]
payload := slices[1]
headerInfo := tracesegment.Header{}
json.Unmarshal(header, &headerInfo)
switch headerInfo.IsValid() {
case true:
default:
log.Warnf("Invalid header: %s", string(header))
d.pool.Return(bufPointer)
telemetry.T.SegmentRejected(1)
continue
}
ts := &tracesegment.TraceSegment{
Raw: &payload,
PoolBuf: bufPointer,
}
atomic.AddUint64(&d.count, 1)
d.std.Send(ts)
}
}
func evaluateBufferMemory(cliBufferMemory int) int {
var bufferMemoryMB int
if cliBufferMemory > 0 {
bufferMemoryMB = cliBufferMemory
} else {
vm, err := mem.VirtualMemory()
if err != nil {
log.Errorf("%v", err)
os.Exit(1)
}
bufferMemoryLimitPercentageOfTotal := 0.01
totalBytes := vm.Total
bufferMemoryMB = int(math.Floor(bufferMemoryLimitPercentageOfTotal * float64(totalBytes) / float64(1024*1024)))
}
if bufferMemoryMB < 3 {
log.Error("Not enough Buffers Memory Allocated. Min Buffers Memory required: 3 MB.")
os.Exit(1)
}
return bufferMemoryMB
}
| 361 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
// +build !windows
package main
import (
"os"
"os/signal"
"syscall"
"time"
log "github.com/cihub/seelog"
)
func (d *Daemon) blockSignalReceived() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM, os.Kill)
s := <-sigs
log.Debugf("Shutdown Initiated. Current epoch in nanoseconds: %v", time.Now().UnixNano())
log.Infof("Got shutdown signal: %v", s)
d.stop()
}
func main() {
d := initDaemon(config)
defer d.close()
go func() {
d.blockSignalReceived()
}()
runDaemon(d)
}
| 40 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
// +build windows
package main
import (
"time"
"golang.org/x/sys/windows/svc"
)
const serviceName = "AmazonX-RayDaemon"
func main() {
svc.Run(serviceName, &TracingDaemonService{})
}
// Structure for X-Ray daemon as a service.
type TracingDaemonService struct{}
// Execute xray as Windows service. Implement golang.org/x/sys/windows/svc#Handler.
func (a *TracingDaemonService) Execute(args []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) {
// notify service controller status is now StartPending
s <- svc.Status{State: svc.StartPending}
// start service
d := initDaemon(config)
// Start a routine to monitor all channels/routines initiated are closed
// This is required for windows as windows daemon wait for process to finish using infinite for loop below
go d.close()
runDaemon(d)
// update service status to Running
const acceptCmds = svc.AcceptStop | svc.AcceptShutdown
s <- svc.Status{State: svc.Running, Accepts: acceptCmds}
loop:
// using an infinite loop to wait for ChangeRequests
for {
// block and wait for ChangeRequests
c := <-r
// handle ChangeRequest, svc.Pause is not supported
switch c.Cmd {
case svc.Interrogate:
s <- c.CurrentStatus
// Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
time.Sleep(100 * time.Millisecond)
s <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
break loop
default:
continue loop
}
}
s <- svc.Status{State: svc.StopPending}
d.stop()
return false, 0
}
| 67 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package bufferpool
import (
"errors"
"math"
"sync"
)
// BufferPool is a structure for storing trace segments.
type BufferPool struct {
// Slice of byte slices to store trace segments.
Buffers []*[]byte
lock sync.Mutex
// Map to track available buffers in the pool.
bufferHeadHash map[*byte]bool
}
// Init initializes new BufferPool with bufferLimit buffers, each of bufferSize.
func Init(bufferLimit int, bufferSize int) *BufferPool {
bufferHeadHash := make(map[*byte]bool)
bufferArray := make([]*[]byte, bufferLimit)
for i := 0; i < bufferLimit; i++ {
buf := make([]byte, bufferSize)
bufferArray[i] = &buf
bufferHeadHash[getBufferPointer(&buf)] = true
}
bufferPool := BufferPool{
Buffers: bufferArray,
lock: sync.Mutex{},
bufferHeadHash: bufferHeadHash,
}
return &bufferPool
}
// Get returns available buffer of BufferPool b, nil if not any.
func (b *BufferPool) Get() *[]byte {
b.lock.Lock()
buffers := b.Buffers
buffersLen := len(buffers)
var buf *[]byte
if buffersLen > 0 {
buf = buffers[buffersLen-1]
b.Buffers = buffers[:buffersLen-1]
delete(b.bufferHeadHash, getBufferPointer(buf))
}
b.lock.Unlock()
return buf
}
// Return adds buffer buf to BufferPool b.
func (b *BufferPool) Return(buf *[]byte) {
b.lock.Lock()
// Rejecting buffer if already in pool
if b.isBufferAlreadyInPool(buf) {
b.lock.Unlock()
return
}
buffers := b.Buffers
buffersCap := cap(buffers)
buffersLen := len(buffers)
if buffersLen < buffersCap {
buffers = append(buffers, buf)
b.Buffers = buffers
b.bufferHeadHash[getBufferPointer(buf)] = true
}
b.lock.Unlock()
}
// CurrentBuffersLen returns length of buffers.
func (b *BufferPool) CurrentBuffersLen() int {
b.lock.Lock()
len := len(b.Buffers)
b.lock.Unlock()
return len
}
func getBufferPointer(buf *[]byte) *byte {
bufVal := *buf
// Using first element as pointer to the whole array as Go array is continuous array
// This might fail if someone return slice of original buffer that was fetched
return &bufVal[0]
}
func (b *BufferPool) isBufferAlreadyInPool(buf *[]byte) bool {
bufPointer := getBufferPointer(buf)
_, ok := b.bufferHeadHash[bufPointer]
return ok
}
// GetPoolBufferCount returns number of buffers that can fit in the given buffer pool limit
// where each buffer is of size receiveBufferSize.
func GetPoolBufferCount(bufferPoolLimitMB int, receiveBufferSize int) (int, error) {
if receiveBufferSize <= 0 {
return 0, errors.New("receive buffer size cannot be less than or equal to zero")
}
if bufferPoolLimitMB <= 0 {
return 0, errors.New("process limit MB cannot be less than or equal to zero")
}
processLimitBytes := bufferPoolLimitMB * 1024 * 1024
return int(math.Floor(float64(processLimitBytes / receiveBufferSize))), nil
}
| 112 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package bufferpool
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
)
type bufferPoolTestCase struct {
processorSizeMB int
bufferSizeKB int
}
func TestBufferPoolGet(t *testing.T) {
testCases := []int{10, 200, 1000, 5000, 10000}
for _, bufferLimit := range testCases {
bufferSize := 256 * 1024
bufferPool := Init(bufferLimit, bufferSize)
// First Fetch
buf := bufferPool.Get()
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1)
assert.NotNil(t, buf)
// Try to get all. Minus 1 due to fetch above
for i := 0; i < bufferLimit-1; i++ {
buf = bufferPool.Get()
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1-(i+1))
assert.NotNil(t, buf)
}
// No more buffer left hence returned nil
buf = bufferPool.Get()
assert.Nil(t, buf)
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), 0)
}
}
func TestBufferReturn(t *testing.T) {
bufferLimit := 10
bufferSize := 256 * 1024
bufferPool := Init(bufferLimit, bufferSize)
buf := make([]byte, bufferSize)
bufferPool.Return(&buf)
// This return should be rejected as pool is already full
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit)
// Fetch one and return buffer
bufferPool.Get()
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1)
bufferPool.Return(&buf)
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit)
// Fetch two and return same buffer returned before which should be rejected
returnedBuf1 := bufferPool.Get()
returnedBuf2 := bufferPool.Get()
assert.NotNil(t, returnedBuf1)
assert.NotNil(t, returnedBuf2)
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-2)
bufferPool.Return(returnedBuf1)
bufferPool.Return(returnedBuf1)
assert.EqualValues(t, bufferPool.CurrentBuffersLen(), bufferLimit-1)
}
func TestBufferGetMultipleRoutine(t *testing.T) {
testCases := []int{100, 1000, 2132}
for _, bufferLimit := range testCases {
bufferSize := 256 * 1024
routines := 5
pool := Init(bufferLimit, bufferSize)
routineFunc := func(c chan int, pool *BufferPool) {
count := 0
for {
buf := pool.Get()
if buf == nil {
break
}
count++
}
c <- count
}
chans := make([]chan int, routines)
for i := 0; i < routines; i++ {
c := make(chan int)
chans[i] = c
go routineFunc(c, pool)
}
totalFetched := 0
for i := 0; i < routines; i++ {
bufFetched := <-chans[i]
totalFetched += bufFetched
}
assert.EqualValues(t, bufferLimit, totalFetched)
buf := pool.Get()
assert.Nil(t, buf)
}
}
func TestGetPoolBufferCount(t *testing.T) {
testCases := []bufferPoolTestCase{
{processorSizeMB: 100, bufferSizeKB: 256},
{processorSizeMB: 16, bufferSizeKB: 125},
{processorSizeMB: 16, bufferSizeKB: 256},
{processorSizeMB: 250, bufferSizeKB: 512},
{processorSizeMB: 5, bufferSizeKB: 50},
}
for _, testCase := range testCases {
processSizeMB := testCase.processorSizeMB
bufferSize := testCase.bufferSizeKB
bufferCount, err := GetPoolBufferCount(processSizeMB, bufferSize)
assert.Nil(t, err)
expected := int(math.Floor(float64((processSizeMB * 1024 * 1024) / bufferSize)))
assert.EqualValues(t, expected, bufferCount)
}
}
func TestGetPoolBufferCountNegativeProcessorSize(t *testing.T) {
bufferCount, err := GetPoolBufferCount(-123, 24512)
assert.EqualValues(t, 0, bufferCount)
assert.NotNil(t, err)
assert.EqualValues(t, err.Error(), "process limit MB cannot be less than or equal to zero")
}
func TestGetPoolBufferCountNegativeBufferSize(t *testing.T) {
bufferCount, err := GetPoolBufferCount(123, -24512)
assert.EqualValues(t, 0, bufferCount)
assert.NotNil(t, err)
assert.EqualValues(t, err.Error(), "receive buffer size cannot be less than or equal to zero")
}
| 159 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package cfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"github.com/aws/aws-xray-daemon/pkg/util"
"gopkg.in/yaml.v2"
log "github.com/cihub/seelog"
)
// Version number of the X-Ray daemon.
var Version = "unknown"
var cfgFileVersions = [...]int{1, 2} // Supported versions of cfg.yaml file.
var configLocations = []string{
"/etc/amazon/xray/cfg.yaml",
"cfg.yaml",
"github.com/aws/aws-xray-daemon/pkg/cfg.yaml",
}
// LogFile represents log file passed through command line argument.
var LogFile string
// LogFormat defines format for logger.
var LogFormat = "%Date(2006-01-02T15:04:05Z07:00) [%Level] %Msg%n"
// Config defines configuration structure for cli parameters.
type Config struct {
// Maximum buffer size in MB (minimum 3). Choose 0 to use 1% of host memory.
TotalBufferSizeMB int `yaml:"TotalBufferSizeMB"`
// Maximum number of concurrent calls to AWS X-Ray to upload segment documents.
Concurrency int `yaml:"Concurrency"`
// X-Ray service endpoint to which the daemon sends segment documents.
Endpoint string `yaml:"Endpoint"`
// Send segments to AWS X-Ray service in a specific region.
Region string `yaml:"Region"`
Socket struct {
// Address and port on which the daemon listens for UDP packets containing segment documents.
UDPAddress string `yaml:"UDPAddress"`
TCPAddress string `yaml:"TCPAddress"`
} `yaml:"Socket"`
ProxyServer struct {
IdleConnTimeout int
MaxIdleConnsPerHost int
MaxIdleConns int
}
// Structure for logging.
Logging struct {
// LogRotation, if true, will rotate log after 50 MB size of current log file.
LogRotation *bool `yaml:"LogRotation"`
// The log level, from most verbose to least: dev, debug, info, warn, error, prod (default).
LogLevel string `yaml:"LogLevel"`
// Logs to the specified file path.
LogPath string `yaml:"LogPath"`
} `yaml:"Logging"`
// Local mode to skip EC2 instance metadata check.
LocalMode *bool `yaml:"LocalMode"`
// Amazon Resource Name (ARN) of the AWS resource running the daemon.
ResourceARN string `yaml:"ResourceARN"`
// IAM role to upload segments to a different account.
RoleARN string `yaml:"RoleARN"`
// Enable or disable TLS certificate verification.
NoVerifySSL *bool `yaml:"NoVerifySSL"`
// Upload segments to AWS X-Ray through a proxy.
ProxyAddress string `yaml:"ProxyAddress"`
// Daemon configuration file format version.
Version int `yaml:"Version"`
}
// DefaultConfig returns default configuration for X-Ray daemon.
func DefaultConfig() *Config {
return &Config{
TotalBufferSizeMB: 0,
Concurrency: 8,
Endpoint: "",
Region: "",
Socket: struct {
UDPAddress string `yaml:"UDPAddress"`
TCPAddress string `yaml:"TCPAddress"`
}{
UDPAddress: "127.0.0.1:2000",
TCPAddress: "127.0.0.1:2000",
},
ProxyServer: struct {
IdleConnTimeout int
MaxIdleConnsPerHost int
MaxIdleConns int
}{
IdleConnTimeout: 30,
MaxIdleConnsPerHost: 2,
MaxIdleConns: 0,
},
Logging: struct {
LogRotation *bool `yaml:"LogRotation"`
LogLevel string `yaml:"LogLevel"`
LogPath string `yaml:"LogPath"`
}{
LogRotation: util.Bool(true),
LogLevel: "prod",
LogPath: "",
},
LocalMode: util.Bool(false),
ResourceARN: "",
RoleARN: "",
NoVerifySSL: util.Bool(false),
ProxyAddress: "",
Version: 1,
}
}
// ParameterConfig is a configuration used by daemon.
type ParameterConfig struct {
SegmentChannel struct {
// Size of trace segments channel.
Std int
}
Socket struct {
// Socket buffer size.
BufferSizeKB int
}
// Number of go routines daemon.poll() to spawn.
ReceiverRoutines int
Processor struct {
// Size of the batch segments processed by Processor.
BatchSize int
// Idle timeout in milliseconds used while sending batch segments.
IdleTimeoutMillisecond int
// MaxIdleConnPerHost, controls the maximum idle
// (keep-alive) HTTP connections to keep per-host.
MaxIdleConnPerHost int
// Used to set Http client timeout in seconds.
RequestTimeout int
BatchProcessorQueueSize int
}
}
// ParameterConfigValue returns instance of ParameterConfig, initialized with default values.
var ParameterConfigValue = &ParameterConfig{
SegmentChannel: struct {
Std int
}{
Std: 250,
},
Socket: struct {
BufferSizeKB int
}{
BufferSizeKB: 64,
},
ReceiverRoutines: 2,
Processor: struct {
BatchSize int
IdleTimeoutMillisecond int
MaxIdleConnPerHost int
RequestTimeout int
BatchProcessorQueueSize int
}{
BatchSize: 50,
IdleTimeoutMillisecond: 1000,
MaxIdleConnPerHost: 8,
RequestTimeout: 2,
BatchProcessorQueueSize: 20,
},
}
// LoadConfig returns configuration from a valid configFile else default configuration.
func LoadConfig(configFile string) *Config {
if configFile == "" {
for _, val := range configLocations {
if _, err := os.Stat(val); os.IsNotExist(err) {
continue
}
return merge(val)
}
return DefaultConfig()
}
return merge(configFile)
}
func loadConfigFromFile(configPath string) *Config {
bytes, err := ioutil.ReadFile(configPath)
if err != nil {
errorAndExit("", err)
}
return loadConfigFromBytes(bytes)
}
func loadConfigFromBytes(bytes []byte) *Config {
c := &Config{}
err := yaml.Unmarshal(bytes, c)
if err != nil {
errorAndExit("", err)
}
return c
}
func errorAndExit(serr string, err error) {
createLogWritersAndLog(serr, err)
rescueStderr := os.Stderr
_, w, _ := os.Pipe()
os.Stderr = w
w.Close()
os.Stderr = rescueStderr
os.Exit(1)
}
// createLogWritersAndLog writes to stderr and provided log file.
func createLogWritersAndLog(serr string, err error) {
var stderrWriter = os.Stderr
var writer io.Writer
stderrLogger, _ := log.LoggerFromWriterWithMinLevelAndFormat(stderrWriter, log.ErrorLvl, LogFormat)
writeToLogger(stderrLogger, serr, err)
if LogFile == "" {
return
}
writer, _ = log.NewFileWriter(LogFile)
fileLogger, _ := log.LoggerFromWriterWithMinLevelAndFormat(writer, log.ErrorLvl, LogFormat)
writeToLogger(fileLogger, serr, err)
}
func writeToLogger(fileLogger log.LoggerInterface, serr string, err error) {
log.ReplaceLogger(fileLogger)
if serr != "" {
log.Errorf("%v", serr)
} else if err != nil {
log.Errorf("Error occur when using config flag: %v", err)
}
}
func configFlagArray(config yaml.MapSlice) []string {
var configArray []string
for i := 0; i < len(config); i++ {
if config[i].Value == nil || reflect.TypeOf(config[i].Value).String() != "yaml.MapSlice" {
configArray = append(configArray, fmt.Sprint(config[i].Key))
} else {
configItem := yaml.MapSlice{}
configItem = config[i].Value.(yaml.MapSlice)
for j := 0; j < len(configItem); j++ {
configArray = append(configArray, fmt.Sprintf("%v.%v", config[i].Key, configItem[j].Key))
}
}
}
return configArray
}
func validConfigArray() []string {
validConfig := yaml.MapSlice{}
validConfigBytes, verr := yaml.Marshal(DefaultConfig())
if verr != nil {
errorAndExit("", verr)
}
yerr := yaml.Unmarshal(validConfigBytes, &validConfig)
if yerr != nil {
errorAndExit("", yerr)
}
return configFlagArray(validConfig)
}
func userConfigArray(configPath string) []string {
fileBytes, rerr := ioutil.ReadFile(configPath)
if rerr != nil {
errorAndExit("", rerr)
}
userConfig := yaml.MapSlice{}
uerr := yaml.Unmarshal(fileBytes, &userConfig)
if uerr != nil {
errorAndExit("", uerr)
}
return configFlagArray(userConfig)
}
// ConfigValidation validates provided configuration file, invalid configuration will exit the process.
func ConfigValidation(configPath string) {
validConfigArray := validConfigArray()
userConfigArray := userConfigArray(configPath)
notSupportFlag := []string{"Profile.CPU", "Profile.Memory", "Socket.BufferSizeKB", "Logging.LogFormat", "Processor.BatchProcessorQueueSize"}
needMigrateFlag := []string{"LogRotation", "Processor.Region", "Processor.Endpoint", "Processor.Routine", "MemoryLimit"}
for i := 0; i < len(userConfigArray); i++ {
if !contains(userConfigArray, "Version") {
errorAndExit("Config Version is missing. Use X-Ray Daemon Config Migration Script to update the config file. Please refer to AWS X-Ray Documentation for more information.", nil)
}
if !contains(validConfigArray, userConfigArray[i]) {
if contains(notSupportFlag, userConfigArray[i]) {
errorMessage := fmt.Sprintf("%v flag is not supported any more. Please refer to AWS X-Ray Documentation for more information.", userConfigArray[i])
errorAndExit(errorMessage, nil)
} else if contains(needMigrateFlag, userConfigArray[i]) {
errorMessage := fmt.Sprintf("%v flag is not supported. Use X-Ray Daemon Config Migration Script to update the config file. Please refer to AWS X-Ray Documentation for more information.", userConfigArray[i])
errorAndExit(errorMessage, nil)
} else {
errorMessage := fmt.Sprintf("%v flag is invalid. Please refer to AWS X-Ray Documentation for more information.", userConfigArray[i])
errorAndExit(errorMessage, nil)
}
}
}
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func merge(configFile string) *Config {
userConfig := loadConfigFromFile(configFile)
versionMatch := false
for i := 0; i < len(cfgFileVersions); i++ {
if cfgFileVersions[i] == userConfig.Version {
versionMatch = true
break
}
}
if !versionMatch {
errorAndExit("Config Version Setting is not correct. Use X-Ray Daemon Config Migration Script to update the config file. Please refer to AWS X-Ray Documentation for more information.", nil)
}
userConfig.Socket.UDPAddress = getStringValue(userConfig.Socket.UDPAddress, DefaultConfig().Socket.UDPAddress)
userConfig.Socket.TCPAddress = getStringValue(userConfig.Socket.TCPAddress, DefaultConfig().Socket.TCPAddress)
userConfig.ProxyServer.IdleConnTimeout = DefaultConfig().ProxyServer.IdleConnTimeout
userConfig.ProxyServer.MaxIdleConnsPerHost = DefaultConfig().ProxyServer.MaxIdleConnsPerHost
userConfig.ProxyServer.MaxIdleConns = DefaultConfig().ProxyServer.MaxIdleConns
userConfig.TotalBufferSizeMB = getIntValue(userConfig.TotalBufferSizeMB, DefaultConfig().TotalBufferSizeMB)
userConfig.ResourceARN = getStringValue(userConfig.ResourceARN, DefaultConfig().ResourceARN)
userConfig.RoleARN = getStringValue(userConfig.RoleARN, DefaultConfig().RoleARN)
userConfig.Concurrency = getIntValue(userConfig.Concurrency, DefaultConfig().Concurrency)
userConfig.Endpoint = getStringValue(userConfig.Endpoint, DefaultConfig().Endpoint)
userConfig.Region = getStringValue(userConfig.Region, DefaultConfig().Region)
userConfig.Logging.LogRotation = getBoolValue(userConfig.Logging.LogRotation, DefaultConfig().Logging.LogRotation)
userConfig.Logging.LogLevel = getStringValue(userConfig.Logging.LogLevel, DefaultConfig().Logging.LogLevel)
userConfig.Logging.LogPath = getStringValue(userConfig.Logging.LogPath, DefaultConfig().Logging.LogPath)
userConfig.NoVerifySSL = getBoolValue(userConfig.NoVerifySSL, DefaultConfig().NoVerifySSL)
userConfig.LocalMode = getBoolValue(userConfig.LocalMode, DefaultConfig().LocalMode)
userConfig.ProxyAddress = getStringValue(userConfig.ProxyAddress, DefaultConfig().ProxyAddress)
return userConfig
}
func getStringValue(configValue string, defaultValue string) string {
if configValue == "" {
return defaultValue
}
return configValue
}
func getIntValue(configValue, defaultValue int) int {
if configValue == 0 {
return defaultValue
}
return configValue
}
func getBoolValue(configValue, defaultValue *bool) *bool {
if configValue == nil {
return defaultValue
}
return configValue
}
| 398 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package cfg
import (
"errors"
"io/ioutil"
"os"
"os/exec"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
var errFile = "error.log"
var tstFileName = "test_config.yaml"
var tstFilePath string
var version2 = 2
var version1 = 1
func setupTestCase() {
LogFile = errFile
}
func tearTestCase() {
LogFile = ""
os.Remove(errFile)
}
func setupTestFile(cnfg string) (string, error) {
goPath := os.Getenv("PWD")
if goPath == "" {
panic("GOPATH not set")
}
tstFilePath = goPath + "/" + tstFileName
f, err := os.Create(tstFilePath)
if err != nil {
panic(err)
}
f.WriteString(cnfg)
f.Close()
return goPath, err
}
func clearTestFile() {
os.Remove(tstFilePath)
}
func TestLoadConfigFromBytes(t *testing.T) {
configString :=
`Socket:
UDPAddress: "127.0.0.1:2000"
TCPAddress: "127.0.0.1:2000"
TotalBufferSizeMB: 16
Region: "us-east-1"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8
Logging:
LogRotation: true
LogPath: ""
LogLevel: "prod"
NoVerifySSL: false
LocalMode: false
ProxyAddress: ""
Version: 2`
c := loadConfigFromBytes([]byte(configString))
assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:2000")
assert.EqualValues(t, c.Socket.TCPAddress, "127.0.0.1:2000")
assert.EqualValues(t, c.TotalBufferSizeMB, 16)
assert.EqualValues(t, c.Region, "us-east-1")
assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com")
assert.EqualValues(t, c.ResourceARN, "")
assert.EqualValues(t, c.RoleARN, "")
assert.EqualValues(t, c.Concurrency, 8)
assert.EqualValues(t, c.Logging.LogLevel, "prod")
assert.EqualValues(t, c.Logging.LogPath, "")
assert.EqualValues(t, *c.Logging.LogRotation, true)
assert.EqualValues(t, *c.NoVerifySSL, false)
assert.EqualValues(t, *c.LocalMode, false)
assert.EqualValues(t, c.ProxyAddress, "")
assert.EqualValues(t, c.Version, version2)
}
func TestLoadConfigFromBytesTypeError(t *testing.T) {
configString :=
`TotalBufferSizeMB: NotExist`
// Only run the failing part when a specific env variable is set
if os.Getenv("Test_Bytes") == "1" {
loadConfigFromBytes([]byte(configString))
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestLoadConfigFromBytesTypeError")
cmd.Env = append(os.Environ(), "Test_Bytes=1")
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Check that the program exited
err := cmd.Wait()
if e, ok := err.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
}
func TestLoadConfigFromFile(t *testing.T) {
configString :=
`Socket:
UDPAddress: "127.0.0.1:2000"
TCPAddress: "127.0.0.1:2000"
TotalBufferSizeMB: 16
Region: "us-east-1"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8
Logging:
LogRotation: true
LogPath: ""
LogLevel: "prod"
NoVerifySSL: false
LocalMode: false
ProxyAddress: ""
Version: 2`
setupTestFile(configString)
c := loadConfigFromFile(tstFilePath)
assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:2000")
assert.EqualValues(t, c.TotalBufferSizeMB, 16)
assert.EqualValues(t, c.Region, "us-east-1")
assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com")
assert.EqualValues(t, c.ResourceARN, "")
assert.EqualValues(t, c.RoleARN, "")
assert.EqualValues(t, c.Concurrency, 8)
assert.EqualValues(t, c.Logging.LogLevel, "prod")
assert.EqualValues(t, c.Logging.LogPath, "")
assert.EqualValues(t, *c.Logging.LogRotation, true)
assert.EqualValues(t, *c.NoVerifySSL, false)
assert.EqualValues(t, *c.LocalMode, false)
assert.EqualValues(t, c.ProxyAddress, "")
assert.EqualValues(t, c.Version, version2)
clearTestFile()
}
func TestLoadConfigFromFileDoesNotExist(t *testing.T) {
setupTestCase()
testFile := "test_config_does_not_exist_121213.yaml"
// Only run the failing part when a specific env variable is set
if os.Getenv("Test_Bytes") == "1" {
loadConfigFromFile(testFile)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestLoadConfigFromFileDoesNotExist")
cmd.Env = append(os.Environ(), "Test_Bytes=1")
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Check that the program exited
err := cmd.Wait()
if e, ok := err.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
tearTestCase()
}
func TestLoadConfigVersion1(t *testing.T) {
configString :=
`Socket:
UDPAddress: "127.0.0.1:2000"
TotalBufferSizeMB: 16
Region: "us-east-1"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8
Logging:
LogRotation: true
LogPath: ""
LogLevel: "prod"
NoVerifySSL: false
LocalMode: false
ProxyAddress: ""
Version: 1`
setupTestFile(configString)
configLocations = append([]string{tstFilePath}, configLocations...)
c := LoadConfig("")
assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:2000")
assert.EqualValues(t, c.Socket.TCPAddress, "127.0.0.1:2000") // TCP address for V! cfg.yaml
assert.EqualValues(t, c.TotalBufferSizeMB, 16)
assert.EqualValues(t, c.Region, "us-east-1")
assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com")
assert.EqualValues(t, c.ResourceARN, "")
assert.EqualValues(t, c.RoleARN, "")
assert.EqualValues(t, c.Concurrency, 8)
assert.EqualValues(t, c.Logging.LogLevel, "prod")
assert.EqualValues(t, c.Logging.LogPath, "")
assert.EqualValues(t, *c.Logging.LogRotation, true)
assert.EqualValues(t, *c.NoVerifySSL, false)
assert.EqualValues(t, *c.LocalMode, false)
assert.EqualValues(t, c.ProxyAddress, "")
assert.EqualValues(t, c.Version, version1)
clearTestFile()
}
func TestLoadConfigVersion2(t *testing.T) {
configString :=
`Socket:
UDPAddress: "127.0.0.1:2000"
TCPAddress : "127.0.0.2:3000"
TotalBufferSizeMB: 16
Region: "us-east-1"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8
Logging:
LogRotation: true
LogPath: ""
LogLevel: "prod"
NoVerifySSL: false
LocalMode: false
ProxyAddress: ""
Version: 2`
setupTestFile(configString)
configLocations = append([]string{tstFilePath}, configLocations...)
c := LoadConfig("")
assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:2000")
assert.EqualValues(t, c.Socket.TCPAddress, "127.0.0.2:3000")
assert.EqualValues(t, c.TotalBufferSizeMB, 16)
assert.EqualValues(t, c.Region, "us-east-1")
assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com")
assert.EqualValues(t, c.ResourceARN, "")
assert.EqualValues(t, c.RoleARN, "")
assert.EqualValues(t, c.Concurrency, 8)
assert.EqualValues(t, c.Logging.LogLevel, "prod")
assert.EqualValues(t, c.Logging.LogPath, "")
assert.EqualValues(t, *c.Logging.LogRotation, true)
assert.EqualValues(t, *c.NoVerifySSL, false)
assert.EqualValues(t, *c.LocalMode, false)
assert.EqualValues(t, c.ProxyAddress, "")
assert.EqualValues(t, c.Version, version2)
clearTestFile()
}
func TestLoadConfigFileNotPresent(t *testing.T) {
configLocations = []string{"test_config_does_not_exist_989078070.yaml"}
c := LoadConfig("")
assert.NotNil(t, c)
// If files config files are not present return default config
assert.EqualValues(t, DefaultConfig(), c)
}
func TestMergeUserConfigWithDefaultConfig(t *testing.T) {
configString :=
`Socket:
UDPAddress: "127.0.0.1:3000"
TotalBufferSizeMB: 8
Region: "us-east-2"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8
Logging:
LogRotation: false
Version: 2`
setupTestFile(configString)
c := merge(tstFilePath)
assert.EqualValues(t, c.Socket.UDPAddress, "127.0.0.1:3000")
assert.EqualValues(t, c.Socket.TCPAddress, "127.0.0.1:2000") // set to default value
assert.EqualValues(t, c.TotalBufferSizeMB, 8)
assert.EqualValues(t, c.Region, "us-east-2")
assert.EqualValues(t, c.Endpoint, "https://xxxx.xxxx.com")
assert.EqualValues(t, c.ResourceARN, "")
assert.EqualValues(t, c.RoleARN, "")
assert.EqualValues(t, c.Concurrency, 8)
assert.EqualValues(t, c.Logging.LogLevel, "prod")
assert.EqualValues(t, c.Logging.LogPath, "")
assert.EqualValues(t, *c.Logging.LogRotation, false)
assert.EqualValues(t, *c.NoVerifySSL, false)
assert.EqualValues(t, *c.LocalMode, false)
assert.EqualValues(t, c.ProxyAddress, "")
assert.EqualValues(t, c.Version, version2)
clearTestFile()
}
func TestConfigVersionNotSet(t *testing.T) {
setupTestCase()
configString :=
`Socket:
UDPAddress: "127.0.0.1:3000"
TotalBufferSizeMB: 8
Region: "us-east-2"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8`
goPath, err := setupTestFile(configString)
// Only run the failing part when a specific env variable is set
if os.Getenv("TEST_CONFIG_VERSION_NOT_SET") == "1" {
ConfigValidation(tstFilePath)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestConfigVersionNotSet")
cmd.Env = append(os.Environ(), "TEST_CONFIG_VERSION_NOT_SET=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
// Check if the log message is what we expected
if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) {
t.Fatal(logErr)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "Config Version is missing."
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
clearTestFile()
tearTestCase()
}
func TestConfigUnsupportedVersionSet(t *testing.T) {
setupTestCase()
configString :=
`Socket:
UDPAddress: "127.0.0.1:3000"
TotalBufferSizeMB: 8
Region: "us-east-2"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8
Version: 10000`
goPath, err := setupTestFile(configString)
// Only run the failing part when a specific env variable is set
if os.Getenv("TEST_CONFIG_UNSUPPORTED_VERSION") == "1" {
merge(tstFilePath)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestConfigUnsupportedVersionSet")
cmd.Env = append(os.Environ(), "TEST_CONFIG_UNSUPPORTED_VERSION=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
// Check if the log message is what we expected
if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) {
t.Fatal(logErr)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "Config Version Setting is not correct."
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
clearTestFile()
tearTestCase()
}
func TestUseMemoryLimitInConfig(t *testing.T) {
setupTestCase()
configString :=
`Socket:
UDPAddress: "127.0.0.1:3000"
MemoryLimit: 8
Region: "us-east-2"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Concurrency: 8
Version: 2`
goPath, err := setupTestFile(configString)
// Only run the failing part when a specific env variable is set
if os.Getenv("TEST_USE_MEMORYLIMIT_FLAG") == "1" {
ConfigValidation(tstFilePath)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestUseMemoryLimitInConfig")
cmd.Env = append(os.Environ(), "TEST_USE_MEMORYLIMIT_FLAG=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
// Check if the log message is what we expected
if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) {
t.Fatal(logErr)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "MemoryLimit flag is not supported."
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
clearTestFile()
tearTestCase()
}
func TestConfigValidationForNotSupportFlags(t *testing.T) {
setupTestCase()
configString :=
`Socket:
BufferSizeKB: 128
Version: 2`
goPath, err := setupTestFile(configString)
// Only run the failing part when a specific env variable is set
if os.Getenv("TEST_NOT_SUPPORT_FLAG") == "1" {
ConfigValidation(tstFilePath)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestConfigValidationForNotSupportFlags")
cmd.Env = append(os.Environ(), "TEST_NOT_SUPPORT_FLAG=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
// Check if the log message is what we expected
if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) {
t.Fatal(logErr)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "Socket.BufferSizeKB flag is not supported any more."
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
clearTestFile()
tearTestCase()
}
func TestConfigValidationForNeedMigrationFlag(t *testing.T) {
setupTestCase()
configString :=
`Processor:
Region: ""
Version: 2`
goPath, err := setupTestFile(configString)
// Only run the failing part when a specific env variable is set
if os.Getenv("TEST_NEED_MIGRATION_FLAG") == "1" {
ConfigValidation(tstFilePath)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestConfigValidationForNeedMigrationFlag")
cmd.Env = append(os.Environ(), "TEST_NEED_MIGRATION_FLAG=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
// Check if the log message is what we expected
if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) {
t.Fatal(logErr)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "Processor.Region flag is not supported. Use X-Ray Daemon Config Migration Script to update the config file."
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
clearTestFile()
tearTestCase()
}
func TestConfigValidationForInvalidFlag(t *testing.T) {
setupTestCase()
configString := `ABCDE: true
Version: 2`
goPath := os.Getenv("PWD")
if goPath == "" {
panic("GOPATH not set")
}
testFile := goPath + "/test_config.yaml"
f, err := os.Create(testFile)
if err != nil {
panic(err)
}
f.WriteString(configString)
f.Close()
// Only run the failing part when a specific env variable is set
if os.Getenv("TEST_INVALID_FLAG") == "1" {
ConfigValidation(testFile)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestConfigValidationForInvalidFlag")
cmd.Env = append(os.Environ(), "TEST_INVALID_FLAG=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
// Check if the log message is what we expected
if _, logErr := os.Stat(goPath + "/" + errFile); os.IsNotExist(logErr) {
t.Fatal(logErr)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "ABCDE flag is invalid."
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
os.Remove(testFile)
tearTestCase()
}
func TestValidConfigArray(t *testing.T) {
validString := []string{"TotalBufferSizeMB", "Concurrency", "Endpoint", "Region", "Socket.UDPAddress", "Socket.TCPAddress", "ProxyServer.IdleConnTimeout", "ProxyServer.MaxIdleConnsPerHost", "ProxyServer.MaxIdleConns", "Logging.LogRotation", "Logging.LogLevel", "Logging.LogPath", "LocalMode", "ResourceARN", "RoleARN", "NoVerifySSL", "ProxyAddress", "Version"}
testString := validConfigArray()
if len(validString) != len(testString) {
t.Fatalf("Unexpect test array length. Got %v but should be %v", len(testString), len(validString))
}
for i, v := range validString {
if !strings.EqualFold(v, testString[i]) {
t.Fatalf("Unexpect Flag in test array. Got %v but should be %v", testString[i], v)
}
}
}
func TestUserConfigArray(t *testing.T) {
configString :=
`Socket:
UDPAddress: "127.0.0.1:3000"
MemoryLimit: 8
Region: "us-east-2"
Endpoint: "https://xxxx.xxxx.com"
ResourceARN: ""
RoleARN: ""
Version: 2`
setupTestFile(configString)
validString := []string{"Socket.UDPAddress", "MemoryLimit", "Region", "Endpoint", "ResourceARN", "RoleARN", "Version"}
testString := userConfigArray(tstFilePath)
if len(validString) != len(testString) {
t.Fatalf("Unexpect test array length. Got %v but should be %v", len(testString), len(validString))
}
for i, v := range validString {
if v != testString[i] {
t.Fatalf("Unexpect Flag in test array. Got %v but should be %v", testString[i], v)
}
}
clearTestFile()
}
func TestErrorAndExitForGivenString(t *testing.T) {
setupTestCase()
// Only run the failing part when a specific env variable is set
if os.Getenv("TEST_STRING_ERROR") == "1" {
errorAndExit("error occurred", nil)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestErrorAndExitForGivenString")
cmd.Env = append(os.Environ(), "TEST_STRING_ERROR=1")
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", e)
}
// Check if the log message is what we expected
goPath := os.Getenv("PWD")
if goPath == "" {
panic("GOPATH not set")
}
if _, err := os.Stat(goPath + "/" + errFile); os.IsNotExist(err) {
t.Fatal(err)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "error occurred"
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
tearTestCase()
}
func TestErrorAndExitForGivenError(t *testing.T) {
setupTestCase()
if os.Getenv("TEST_ERROR") == "1" {
err := errors.New("this is an error")
errorAndExit("", err)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestErrorAndExitForGivenError")
cmd.Env = append(os.Environ(), "TEST_ERROR=1")
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", e)
}
// Check if the log message is what we expected
goPath := os.Getenv("PWD")
if goPath == "" {
panic("GOPATH not set")
}
if _, err := os.Stat(goPath + "/" + errFile); os.IsNotExist(err) {
t.Fatal(err)
}
gotBytes, err := ioutil.ReadFile(goPath + "/" + errFile)
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
expected := "this is an error"
if !strings.Contains(got, expected) {
t.Fatalf("Unexpected log message. Got %s but should contain %s", got, expected)
}
tearTestCase()
}
| 728 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package cli
import (
"flag"
"fmt"
"os"
)
// Flag is used for cli parameters.
type Flag struct {
// A set of flags used for cli configuration.
fs *flag.FlagSet
// String array used to display flag information on cli.
cliStrings []string
}
// NewFlag returns a new flag with provided flag name.
func NewFlag(name string) *Flag {
flag := &Flag{
cliStrings: make([]string, 0, 19),
fs: flag.NewFlagSet(name, flag.ExitOnError),
}
return flag
}
// IntVarF defines 2 int flags for specified name and shortName with default value, and usage string.
// The argument ptr points to an int variable in which to store the value of the flag.
func (f *Flag) IntVarF(ptr *int, name string, shortName string, value int, usage string) {
f.fs.IntVar(ptr, name, value, usage)
f.fs.IntVar(ptr, shortName, value, usage)
s := fmt.Sprintf("\t-%v\t--%v\t%v", shortName, name, usage)
f.cliStrings = append(f.cliStrings, s)
}
// StringVarF defines 2 string flags for specified name and shortName, default value, and usage string.
// The argument ptr points to a string variable in which to store the value of the flag.
func (f *Flag) StringVarF(ptr *string, name string, shortName string, value string, usage string) {
f.fs.StringVar(ptr, name, value, usage)
f.fs.StringVar(ptr, shortName, value, usage)
var s string
if len(name) <= 4 {
s = fmt.Sprintf("\t-%v\t--%v\t\t%v", shortName, name, usage)
} else {
s = fmt.Sprintf("\t-%v\t--%v\t%v", shortName, name, usage)
}
f.cliStrings = append(f.cliStrings, s)
}
// BoolVarF defines 2 bool flags with specified name and shortName, default value, and usage string.
// The argument ptr points to a bool variable in which to store the value of the flag.
func (f *Flag) BoolVarF(ptr *bool, name string, shortName string, value bool, usage string) {
f.fs.BoolVar(ptr, name, value, usage)
f.fs.BoolVar(ptr, shortName, value, usage)
s := fmt.Sprintf("\t-%v\t--%v\t%v", shortName, name, usage)
f.cliStrings = append(f.cliStrings, s)
}
// Format function formats Flag f for cli display.
func (f *Flag) Format() []string {
var cliDisplay = make([]string, 0, 20)
s := fmt.Sprint("Usage: X-Ray [options]")
cliDisplay = append(cliDisplay, s)
for val := range f.cliStrings {
cliDisplay = append(cliDisplay, f.cliStrings[val])
}
s = fmt.Sprint("\t-h\t--help\t\tShow this screen")
cliDisplay = append(cliDisplay, s)
return cliDisplay
}
// ParseFlags parses flag definitions from the command line, which should not
// include the command name. Must be called after all flags in the FlagSet
// are defined and before flags are accessed by the program.
// The return value will be ErrHelp if -help or -h were set but not defined.
func (f *Flag) ParseFlags() {
f.fs.Usage = func() {
display := f.Format()
for val := range display {
fmt.Println(display[val])
}
}
f.fs.Parse(os.Args[1:])
}
| 94 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package cli
import (
"math/rand"
"os"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
)
type CLIArgs struct {
StorageShort []string // store the shorthand flag
StorageLong []string // store the flag name
StorageUsage []string // store the flag usage
StorageFlagInt []int // store the flag int value
StorageFlagString []string // store the flag string value
StorageFlagBool []bool // store the flag bool value
}
// generate the random string for given length
func RandStr(strSize int) string {
alphaNum := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, strSize)
rand.Read(bytes)
for i, b := range bytes {
bytes[i] = alphaNum[b%byte(len(alphaNum))]
}
return string(bytes)
}
// store the given number into an variable
func InitialVar(paras []int) []int {
passLen := make([]int, 0, len(paras))
for i := 0; i < len(paras); i++ {
passLen = append(passLen, paras[i])
}
return passLen
}
// mock commandline input
func SetUpInputs(args []string, f *Flag) {
a := os.Args[1:]
if args != nil {
a = args
}
f.fs.Parse(a)
}
func (cli *CLIArgs) DefineFlagsArray(arrayLen int, strSize []int, strSizeFlag []int) *CLIArgs {
cli.StorageShort = make([]string, 0, arrayLen)
cli.StorageLong = make([]string, 0, arrayLen)
cli.StorageUsage = make([]string, 0, arrayLen)
cli.StorageFlagInt = make([]int, 0, arrayLen)
cli.StorageFlagString = make([]string, 0, arrayLen)
cli.StorageFlagBool = make([]bool, 0, arrayLen)
mShort := make(map[string]bool, arrayLen)
mLong := make(map[string]bool, arrayLen)
mUsage := make(map[string]bool, arrayLen)
for i := 0; i < len(strSize); i++ {
for j := 0; j < arrayLen; j++ {
if strSize[i] == strSizeFlag[0] {
for {
s := RandStr(strSize[i])
_, ok := mShort[s]
if !ok {
mShort[s] = true
break
}
}
}
if strSize[i] == strSizeFlag[1] {
for {
s := RandStr(strSize[i])
_, ok := mLong[s]
if !ok {
mLong[s] = true
break
}
}
}
if strSize[i] == strSizeFlag[2] {
for {
s := RandStr(strSize[i])
_, ok := mUsage[s]
if !ok {
mUsage[s] = true
break
}
}
}
}
}
for k := range mShort {
cli.StorageShort = append(cli.StorageShort, k)
}
for k := range mLong {
cli.StorageLong = append(cli.StorageLong, k)
}
for k := range mUsage {
cli.StorageUsage = append(cli.StorageUsage, k)
}
for i := 0; i < arrayLen; i++ {
cli.StorageFlagInt = append(cli.StorageFlagInt, 0)
}
for i := 0; i < arrayLen; i++ {
cli.StorageFlagString = append(cli.StorageFlagString, "&")
}
for i := 0; i < arrayLen; i++ {
cli.StorageFlagBool = append(cli.StorageFlagBool, true)
}
return cli
}
func (cli *CLIArgs) InitialFlags(f *Flag) *CLIArgs {
for i := 0; i < 10; i++ {
f.IntVarF(&cli.StorageFlagInt[i], cli.StorageLong[i], cli.StorageShort[i], -1, cli.StorageUsage[i])
}
for i := 10; i < 20; i++ {
f.StringVarF(&cli.StorageFlagString[i-10], cli.StorageLong[i], cli.StorageShort[i], "*", cli.StorageUsage[i])
}
for i := 20; i < 30; i++ {
f.BoolVarF(&cli.StorageFlagBool[i-20], cli.StorageLong[i], cli.StorageShort[i], false, cli.StorageUsage[i])
}
return cli
}
func TestSettingsFromFlags(t *testing.T) {
f := NewFlag("Test Flag")
paras := []int{1, 5, 10} // generate the random string, the length are 1, 5, 10
varSize := InitialVar(paras)
c := CLIArgs{}
cli := c.DefineFlagsArray(30, paras, varSize)
cli = c.InitialFlags(f)
var num [10]string
var str [10]string
var bo [10]string
input := make([]string, 0, 60)
inputFlags := make([]string, 0, 30)
inputFlagsValue := make([]string, 0, 30)
// generate the commandline input
for i := 0; i < 10; i++ {
num[i] = strconv.Itoa(rand.Intn(100))
str[i] = RandStr(rand.Intn(5) + 1)
bo[i] = strconv.FormatBool(true)
}
for i := 0; i < 30; i++ {
if i < 10 {
marked := "-" + cli.StorageShort[i]
input = append(input, marked)
inputFlags = append(inputFlags, marked)
input = append(input, num[i])
inputFlagsValue = append(inputFlagsValue, num[i])
}
if i >= 10 && i < 20 {
marked := "-" + cli.StorageShort[i]
input = append(input, marked)
inputFlags = append(inputFlags, marked)
input = append(input, str[i-10])
inputFlagsValue = append(inputFlagsValue, str[i-10])
}
if i >= 20 && i < 30 {
inputFlags = append(inputFlags, "-"+cli.StorageShort[i])
marked := "-" + cli.StorageShort[i] + "=" + bo[i-20]
input = append(input, marked)
inputFlagsValue = append(inputFlagsValue, bo[i-20])
}
}
// test the default value
SetUpInputs([]string{""}, f)
for i := 0; i < 30; i++ {
if i < 10 {
assert.Equal(t, -1, cli.StorageFlagInt[i], "Failed to get the default value")
}
if i >= 10 && i < 20 {
assert.Equal(t, "*", cli.StorageFlagString[i-10], "Failed to get the default value")
}
if i >= 20 && i < 30 {
assert.Equal(t, false, cli.StorageFlagBool[i-20], "Failed to get the default value")
}
}
// test commandline parse value
SetUpInputs(input, f)
for i := 0; i < 30; i++ {
if i < 10 {
assert.Equal(t, inputFlagsValue[i], strconv.Itoa(cli.StorageFlagInt[i]), "Failed to parse the value")
}
if i >= 10 && i < 20 {
assert.Equal(t, inputFlagsValue[i], cli.StorageFlagString[i-10], "Failed to parse the value")
}
if i >= 20 && i < 30 {
assert.Equal(t, inputFlagsValue[i], strconv.FormatBool(cli.StorageFlagBool[i-20]), "Failed to parse the value")
}
}
// test flag usage
for i := 0; i < 30; i++ {
assert.Equal(t, cli.StorageUsage[i], f.fs.Lookup(cli.StorageShort[i]).Usage, "Failed to give the usage of the flag")
}
// test the display of usage
s := f.Format()
for i := 0; i < 30; i++ {
assert.Equal(t, f.cliStrings[i], s[i+1], "Failed to match the format")
}
}
| 224 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package conn
import (
"crypto/tls"
"net/http"
"net/url"
"os"
"time"
"encoding/json"
"io/ioutil"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-xray-daemon/pkg/cfg"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
log "github.com/cihub/seelog"
"golang.org/x/net/http2"
)
type connAttr interface {
newAWSSession(roleArn string, region string) *session.Session
getEC2Region(s *session.Session) (string, error)
}
// Conn implements connAttr interface.
type Conn struct{}
func (c *Conn) getEC2Region(s *session.Session) (string, error) {
return ec2metadata.New(s).Region()
}
const (
STSEndpointPrefix = "https://sts."
STSEndpointSuffix = ".amazonaws.com"
STSAwsCnPartitionIDSuffix = ".amazonaws.com.cn" // AWS China partition.
)
// getNewHTTPClient returns new HTTP client instance with provided configuration.
func getNewHTTPClient(maxIdle int, requestTimeout int, noVerify bool, proxyAddress string) *http.Client {
log.Debugf("Using proxy address: %v", proxyAddress)
tls := &tls.Config{
InsecureSkipVerify: noVerify,
}
finalProxyAddress := getProxyAddress(proxyAddress)
proxyURL := getProxyURL(finalProxyAddress)
transport := &http.Transport{
MaxIdleConnsPerHost: maxIdle,
TLSClientConfig: tls,
Proxy: http.ProxyURL(proxyURL),
}
// is not enabled by default as we configure TLSClientConfig for supporting SSL to data plane.
// http2.ConfigureTransport will setup transport layer to use HTTP2
http2.ConfigureTransport(transport)
http := &http.Client{
Transport: transport,
Timeout: time.Second * time.Duration(requestTimeout),
}
return http
}
func getProxyAddress(proxyAddress string) string {
var finalProxyAddress string
if proxyAddress != "" {
finalProxyAddress = proxyAddress
} else if proxyAddress == "" && os.Getenv("HTTPS_PROXY") != "" {
finalProxyAddress = os.Getenv("HTTPS_PROXY")
} else {
finalProxyAddress = ""
}
return finalProxyAddress
}
func getProxyURL(finalProxyAddress string) *url.URL {
var proxyURL *url.URL
var err error
if finalProxyAddress != "" {
proxyURL, err = url.Parse(finalProxyAddress)
if err != nil {
log.Errorf("Bad proxy URL: %v", err)
os.Exit(1)
}
} else {
proxyURL = nil
}
return proxyURL
}
func getRegionFromECSMetadata() string {
var ecsMetadataEnabled string
var metadataFilePath string
var metadataFile []byte
var dat map[string]interface{}
var taskArn []string
var err error
var region string
region = ""
ecsMetadataEnabled = os.Getenv("ECS_ENABLE_CONTAINER_METADATA")
ecsMetadataEnabled = strings.ToLower(ecsMetadataEnabled)
if ecsMetadataEnabled == "true" {
metadataFilePath = os.Getenv("ECS_CONTAINER_METADATA_FILE")
metadataFile, err = ioutil.ReadFile(metadataFilePath)
if err != nil {
log.Errorf("Unable to open ECS metadata file: %v\n", err)
} else {
if err := json.Unmarshal(metadataFile, &dat); err != nil {
log.Errorf("Unable to read ECS metadata file contents: %v", err)
} else {
taskArn = strings.Split(dat["TaskARN"].(string), ":")
region = taskArn[3]
log.Debugf("Fetch region %v from ECS metadata file", region)
}
}
}
return region
}
// GetAWSConfigSession returns AWS config and session instances.
func GetAWSConfigSession(cn connAttr, c *cfg.Config, roleArn string, region string, noMetadata bool) (*aws.Config, *session.Session) {
var s *session.Session
var err error
var awsRegion string
http := getNewHTTPClient(cfg.ParameterConfigValue.Processor.MaxIdleConnPerHost, cfg.ParameterConfigValue.Processor.RequestTimeout, *c.NoVerifySSL, c.ProxyAddress)
regionEnv := os.Getenv("AWS_REGION")
if region == "" && regionEnv != "" {
awsRegion = regionEnv
log.Debugf("Fetch region %v from environment variables", awsRegion)
} else if region != "" {
awsRegion = region
log.Debugf("Fetch region %v from commandline/config file", awsRegion)
} else if !noMetadata {
awsRegion = getRegionFromECSMetadata()
if awsRegion == "" {
es := getDefaultSession()
awsRegion, err = cn.getEC2Region(es)
if err != nil {
log.Errorf("Unable to fetch region from EC2 metadata: %v\n", err)
} else {
log.Debugf("Fetch region %v from ec2 metadata", awsRegion)
}
}
} else {
es := getDefaultSession()
awsRegion = *es.Config.Region
log.Debugf("Fetched region %v from session config", awsRegion)
}
if awsRegion == "" {
log.Errorf("Cannot fetch region variable from config file, environment variables, ecs metadata, or ec2 metadata. Use local-mode to use the local session region.")
os.Exit(1)
}
s = cn.newAWSSession(roleArn, awsRegion)
config := &aws.Config{
Region: aws.String(awsRegion),
DisableParamValidation: aws.Bool(true),
MaxRetries: aws.Int(2),
Endpoint: aws.String(c.Endpoint),
HTTPClient: http,
}
return config, s
}
// ProxyServerTransport configures HTTP transport for TCP Proxy Server.
func ProxyServerTransport(config *cfg.Config) *http.Transport {
tls := &tls.Config{
InsecureSkipVerify: *config.NoVerifySSL,
}
proxyAddr := getProxyAddress(config.ProxyAddress)
proxyURL := getProxyURL(proxyAddr)
// Connection timeout in seconds
idleConnTimeout := time.Duration(config.ProxyServer.IdleConnTimeout) * time.Second
transport := &http.Transport{
MaxIdleConns: config.ProxyServer.MaxIdleConns,
MaxIdleConnsPerHost: config.ProxyServer.MaxIdleConnsPerHost,
IdleConnTimeout: idleConnTimeout,
Proxy: http.ProxyURL(proxyURL),
TLSClientConfig: tls,
// If not disabled the transport will add a gzip encoding header
// to requests with no `accept-encoding` header value. The header
// is added after we sign the request which invalidates the
// signature.
DisableCompression: true,
}
return transport
}
func (c *Conn) newAWSSession(roleArn string, region string) *session.Session {
var s *session.Session
var err error
if roleArn == "" {
s = getDefaultSession()
} else {
stsCreds := getSTSCreds(region, roleArn)
s, err = session.NewSession(&aws.Config{
Credentials: stsCreds,
})
if err != nil {
log.Errorf("Error in creating session object : %v\n.", err)
os.Exit(1)
}
}
return s
}
// getSTSCreds gets STS credentials from regional endpoint. ErrCodeRegionDisabledException is received if the
// STS regional endpoint is disabled. In this case STS credentials are fetched from STS primary regional endpoint
// in the respective AWS partition.
func getSTSCreds(region string, roleArn string) *credentials.Credentials {
t := getDefaultSession()
stsCred := getSTSCredsFromRegionEndpoint(t, region, roleArn)
// Make explicit call to fetch credentials.
_, err := stsCred.Get()
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case sts.ErrCodeRegionDisabledException:
log.Errorf("Region : %v - %v", region, aerr.Error())
log.Info("Credentials for provided RoleARN will be fetched from STS primary region endpoint instead of regional endpoint.")
stsCred = getSTSCredsFromPrimaryRegionEndpoint(t, roleArn, region)
}
}
}
return stsCred
}
// getSTSCredsFromRegionEndpoint fetches STS credentials for provided roleARN from regional endpoint.
// AWS STS recommends that you provide both the Region and endpoint when you make calls to a Regional endpoint.
// Reference: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code
func getSTSCredsFromRegionEndpoint(sess *session.Session, region string, roleArn string) *credentials.Credentials {
regionalEndpoint := getSTSRegionalEndpoint(region)
// if regionalEndpoint is "", the STS endpoint is Global endpoint for classic regions except ap-east-1 - (HKG)
// for other opt-in regions, region value will create STS regional endpoint.
// This will be only in the case, if provided region is not present in aws_regions.go
c := &aws.Config{Region: aws.String(region), Endpoint: ®ionalEndpoint}
st := sts.New(sess, c)
log.Infof("STS Endpoint : %v", st.Endpoint)
return stscreds.NewCredentialsWithClient(st, roleArn)
}
// getSTSCredsFromPrimaryRegionEndpoint fetches STS credentials for provided roleARN from primary region endpoint in the
// respective partition.
func getSTSCredsFromPrimaryRegionEndpoint(t *session.Session, roleArn string, region string) *credentials.Credentials {
partitionId := getPartition(region)
if partitionId == endpoints.AwsPartitionID {
return getSTSCredsFromRegionEndpoint(t, endpoints.UsEast1RegionID, roleArn)
} else if partitionId == endpoints.AwsCnPartitionID {
return getSTSCredsFromRegionEndpoint(t, endpoints.CnNorth1RegionID, roleArn)
} else if partitionId == endpoints.AwsUsGovPartitionID {
return getSTSCredsFromRegionEndpoint(t, endpoints.UsGovWest1RegionID, roleArn)
}
return nil
}
func getSTSRegionalEndpoint(r string) string {
p := getPartition(r)
var e string
if p == endpoints.AwsPartitionID || p == endpoints.AwsUsGovPartitionID {
e = STSEndpointPrefix + r + STSEndpointSuffix
} else if p == endpoints.AwsCnPartitionID {
e = STSEndpointPrefix + r + STSAwsCnPartitionIDSuffix
}
return e
}
func getDefaultSession() *session.Session {
result, serr := session.NewSessionWithOptions(session.Options{SharedConfigState: session.SharedConfigEnable})
if serr != nil {
log.Errorf("Error in creating session object : %v\n.", serr)
os.Exit(1)
}
return result
}
// getPartition return AWS Partition for the provided region.
func getPartition(region string) string {
p, _ := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), region)
return p.ID()
}
| 307 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package conn
import (
"errors"
"fmt"
"os"
"os/exec"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-xray-daemon/pkg/util/test"
"github.com/stretchr/testify/mock"
"github.com/aws/aws-xray-daemon/pkg/cfg"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/stretchr/testify/assert"
)
var ec2Region = "us-east-1"
var tstFileName = "test_config.json"
var tstFilePath string
type mockConn struct {
mock.Mock
sn *session.Session
}
func setupTestFile(cnfg string) (string, error) {
goPath := os.Getenv("PWD")
if goPath == "" {
panic("GOPATH not set")
}
tstFilePath = goPath + "/" + tstFileName
f, err := os.Create(tstFilePath)
if err != nil {
panic(err)
}
f.WriteString(cnfg)
f.Close()
return goPath, err
}
func clearTestFile() {
os.Remove(tstFilePath)
}
func (c *mockConn) getEC2Region(s *session.Session) (string, error) {
args := c.Called(nil)
errorStr := args.String(0)
var err error
if errorStr != "" {
err = errors.New(errorStr)
return "", err
}
return ec2Region, nil
}
func (c *mockConn) newAWSSession(roleArn string, region string) *session.Session {
return c.sn
}
// fetch region value from ec2 meta data service
func TestEC2Session(t *testing.T) {
m := new(mockConn)
log := test.LogSetup()
m.On("getEC2Region", nil).Return("").Once()
var expectedSession *session.Session
roleARN := ""
expectedSession, _ = session.NewSession()
m.sn = expectedSession
cfg, s := GetAWSConfigSession(m, cfg.DefaultConfig(), roleARN, "", false)
assert.Equal(t, s, expectedSession, "Expect the session object is not overridden")
assert.Equal(t, *cfg.Region, ec2Region, "Region value fetched from ec2-metadata service")
fmt.Printf("Logs: %v", log.Logs)
assert.True(t, strings.Contains(log.Logs[1], fmt.Sprintf("Fetch region %v from ec2 metadata", ec2Region)))
}
// fetch region value from environment variable
func TestRegionEnv(t *testing.T) {
log := test.LogSetup()
region := "us-west-2"
env := stashEnv()
defer popEnv(env)
os.Setenv("AWS_REGION", region)
var m = &mockConn{}
var expectedSession *session.Session
roleARN := ""
expectedSession, _ = session.NewSession()
m.sn = expectedSession
cfg, s := GetAWSConfigSession(m, cfg.DefaultConfig(), roleARN, "", true)
assert.Equal(t, s, expectedSession, "Expect the session object is not overridden")
assert.Equal(t, *cfg.Region, region, "Region value fetched from environment")
assert.True(t, strings.Contains(log.Logs[1], fmt.Sprintf("Fetch region %v from environment variables", region)))
}
// Get region from the command line fo config file
func TestRegionArgument(t *testing.T) {
log := test.LogSetup()
region := "ap-northeast-1"
var m = &mockConn{}
var expectedSession *session.Session
roleARN := ""
expectedSession, _ = session.NewSession()
m.sn = expectedSession
cfg, s := GetAWSConfigSession(m, cfg.DefaultConfig(), roleARN, region, true)
assert.Equal(t, s, expectedSession, "Expect the session object is not overridden")
assert.Equal(t, *cfg.Region, region, "Region value fetched from the environment")
assert.True(t, strings.Contains(log.Logs[1], fmt.Sprintf("Fetch region %v from commandline/config file", region)))
}
// exit function if no region value found
func TestNoRegion(t *testing.T) {
region := ""
envFlag := "NO_REGION"
var m = &mockConn{}
var expectedSession *session.Session
roleARN := ""
expectedSession, _ = session.NewSession()
m.sn = expectedSession
if os.Getenv(envFlag) == "1" {
GetAWSConfigSession(m, cfg.DefaultConfig(), roleARN, region, true) // exits because no region found
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestNoRegion")
cmd.Env = append(os.Environ(), envFlag+"=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", error)
}
}
// getRegionFromECSMetadata() returns a valid region from an appropriate JSON file
func TestValidECSRegion(t *testing.T) {
metadataFile :=
`{
"Cluster": "default",
"ContainerInstanceARN": "arn:aws:ecs:us-east-1:012345678910:container-instance/default/1f73d099-b914-411c-a9ff-81633b7741dd",
"TaskARN": "arn:aws:ecs:us-east-1:012345678910:task/default/2b88376d-aba3-4950-9ddf-bcb0f388a40c",
"TaskDefinitionFamily": "console-sample-app-static",
"TaskDefinitionRevision": "1",
"ContainerID": "aec2557997f4eed9b280c2efd7afccdcedfda4ac399f7480cae870cfc7e163fd",
"ContainerName": "simple-app",
"DockerContainerName": "/ecs-console-sample-app-static-1-simple-app-e4e8e495e8baa5de1a00",
"ImageID": "sha256:2ae34abc2ed0a22e280d17e13f9c01aaf725688b09b7a1525d1a2750e2c0d1de",
"ImageName": "httpd:2.4",
"PortMappings": [
{
"ContainerPort": 80,
"HostPort": 80,
"BindIp": "0.0.0.0",
"Protocol": "tcp"
}
],
"Networks": [
{
"NetworkMode": "bridge",
"IPv4Addresses": [
"192.0.2.0"
]
}
],
"MetadataFileStatus": "READY",
"AvailabilityZone": "us-east-1b",
"HostPrivateIPv4Address": "192.0.2.0",
"HostPublicIPv4Address": "203.0.113.0"
}`
setupTestFile(metadataFile)
env := stashEnv()
defer popEnv(env)
os.Setenv("ECS_ENABLE_CONTAINER_METADATA", "true")
os.Setenv("ECS_CONTAINER_METADATA_FILE", tstFilePath)
testString := getRegionFromECSMetadata()
assert.EqualValues(t, "us-east-1", testString)
clearTestFile()
os.Clearenv()
}
// getRegionFromECSMetadata() returns an empty string if ECS metadata related env is not set
func TestNoECSMetadata(t *testing.T){
env := stashEnv()
defer popEnv(env)
testString := getRegionFromECSMetadata()
assert.EqualValues(t, "", testString)
}
// getRegionFromECSMetadata() throws an error and returns an empty string when ECS metadata file cannot be parsed as valid JSON
func TestInvalidECSMetadata(t *testing.T){
metadataFile := "][foobar})("
setupTestFile(metadataFile)
env := stashEnv()
defer popEnv(env)
os.Setenv("ECS_ENABLE_CONTAINER_METADATA", "true")
os.Setenv("ECS_CONTAINER_METADATA_FILE", tstFilePath)
log := test.LogSetup()
testString := getRegionFromECSMetadata()
assert.EqualValues(t, "", testString)
assert.True(t, strings.Contains(log.Logs[0], "Unable to read"))
clearTestFile()
}
// getRegionFromECSMetadata() throws an error and returns an empty string when ECS metadata file cannot be opened
func TestMissingECSMetadataFile(t *testing.T){
metadataFile := "foobar"
setupTestFile(metadataFile)
env := stashEnv()
defer popEnv(env)
clearTestFile()
os.Setenv("ECS_ENABLE_CONTAINER_METADATA", "true")
os.Setenv("ECS_CONTAINER_METADATA_FILE", metadataFile)
log := test.LogSetup()
testString := getRegionFromECSMetadata()
assert.EqualValues(t, "", testString)
assert.True(t, strings.Contains(log.Logs[0], "Unable to open"))
}
// getEC2Region() returns nil region and error, resulting in exiting the process
func TestErrEC2(t *testing.T) {
m := new(mockConn)
m.On("getEC2Region", nil).Return("Error").Once()
var expectedSession *session.Session
roleARN := ""
expectedSession, _ = session.NewSession()
m.sn = expectedSession
envFlag := "NO_REGION"
if os.Getenv(envFlag) == "1" {
GetAWSConfigSession(m, cfg.DefaultConfig(), roleARN, "", false)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestErrEC2")
cmd.Env = append(os.Environ(), envFlag+"=1")
if cmdErr := cmd.Start(); cmdErr != nil {
t.Fatal(cmdErr)
}
// Check that the program exited
error := cmd.Wait()
if e, ok := error.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", error)
}
}
func TestLoadEnvConfigCreds(t *testing.T) {
env := stashEnv()
defer popEnv(env)
cases := struct {
Env map[string]string
Val credentials.Value
}{
Env: map[string]string{
"AWS_ACCESS_KEY": "AKID",
"AWS_SECRET_KEY": "SECRET",
"AWS_SESSION_TOKEN": "TOKEN",
},
Val: credentials.Value{
AccessKeyID: "AKID", SecretAccessKey: "SECRET", SessionToken: "TOKEN",
ProviderName: "EnvConfigCredentials",
},
}
for k, v := range cases.Env {
os.Setenv(k, v)
}
c := &Conn{}
cfg := c.newAWSSession("", "")
value, err := cfg.Config.Credentials.Get()
assert.Nil(t, err, "Expect no error")
assert.Equal(t, cases.Val, value, "Expect the credentials value to match")
cfgA := c.newAWSSession("ROLEARN", "TEST")
valueA, _ := cfgA.Config.Credentials.Get()
assert.Equal(t, "", valueA.AccessKeyID, "Expect the value to be empty")
assert.Equal(t, "", valueA.SecretAccessKey, "Expect the value to be empty")
assert.Equal(t, "", valueA.SessionToken, "Expect the value to be empty")
assert.Equal(t, "AssumeRoleProvider", valueA.ProviderName, "Expect the value to be AssumeRoleProvider")
}
func TestGetProxyUrlProxyAddressNotValid(t *testing.T) {
errorAddress := [3]string{"http://[%10::1]", "http://%41:8080/", "http://a b.com/"}
for _, address := range errorAddress {
// Only run the failing part when a specific env variable is set
if os.Getenv("Test_PROXY_URL") == "1" {
getProxyURL(address)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestGetProxyUrlProxyAddressNotValid")
cmd.Env = append(os.Environ(), "Test_PROXY_URL=1")
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Check that the program exited
err := cmd.Wait()
if e, ok := err.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
}
}
func TestGetProxyAddressFromEnvVariable(t *testing.T) {
env := stashEnv()
defer popEnv(env)
os.Setenv("HTTPS_PROXY", "https://127.0.0.1:8888")
assert.Equal(t, os.Getenv("HTTPS_PROXY"), getProxyAddress(""), "Expect function return value should be same with Environment value")
}
func TestGetProxyAddressFromConfigFile(t *testing.T) {
env := stashEnv()
defer popEnv(env)
assert.Equal(t, "https://127.0.0.1:8888", getProxyAddress("https://127.0.0.1:8888"), "Expect function return value should be same with input value")
}
func TestGetProxyAddressWhenNotExist(t *testing.T) {
env := stashEnv()
defer popEnv(env)
assert.Equal(t, "", getProxyAddress(""), "Expect function return value to be empty")
}
func TestGetProxyAddressPriority(t *testing.T) {
env := stashEnv()
defer popEnv(env)
os.Setenv("HTTPS_PROXY", "https://127.0.0.1:8888")
assert.Equal(t, "https://127.0.0.1:9999", getProxyAddress("https://127.0.0.1:9999"), "Expect function return value to be same with input")
}
func TestGetPartition1(t *testing.T) {
r := "us-east-1"
p := getPartition(r)
assert.Equal(t, endpoints.AwsPartitionID, p)
}
func TestGetPartition2(t *testing.T) {
r := "cn-north-1"
p := getPartition(r)
assert.Equal(t, endpoints.AwsCnPartitionID, p)
}
func TestGetPartition3(t *testing.T) {
r := "us-gov-east-1"
p := getPartition(r)
assert.Equal(t, endpoints.AwsUsGovPartitionID, p)
}
func TestGetPartition4(t *testing.T) { // if a region is not present in the array
r := "XYZ"
p := getPartition(r)
assert.Equal(t, "", p)
}
func TestGetSTSRegionalEndpoint1(t *testing.T) {
r := "us-east-1"
p := getSTSRegionalEndpoint(r)
assert.Equal(t, "https://sts.us-east-1.amazonaws.com", p)
}
func TestGetSTSRegionalEndpoint2(t *testing.T) {
r := "cn-north-1"
p := getSTSRegionalEndpoint(r)
assert.Equal(t, "https://sts.cn-north-1.amazonaws.com.cn", p)
}
func TestGetSTSRegionalEndpoint3(t *testing.T) {
r := "us-gov-east-1"
p := getSTSRegionalEndpoint(r)
assert.Equal(t, "https://sts.us-gov-east-1.amazonaws.com", p)
}
func TestGetSTSRegionalEndpoint4(t *testing.T) { // if a region is not present in the array
r := "XYZ"
p := getPartition(r)
assert.Equal(t, "", p)
}
func stashEnv() []string {
env := os.Environ()
os.Clearenv()
return env
}
func popEnv(env []string) {
os.Clearenv()
for _, e := range env {
p := strings.SplitN(e, "=", 2)
os.Setenv(p[0], p[1])
}
}
| 426 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package conn
import (
"os"
"runtime"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/xray"
"github.com/aws/aws-xray-daemon/pkg/cfg"
log "github.com/cihub/seelog"
)
// Constant prefixes used to identify information in user-agent
const agentPrefix = "xray-agent/xray-daemon/"
const execEnvPrefix = " exec-env/"
const osPrefix = " OS/"
// XRay defines X-Ray api call structure.
type XRay interface {
PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error)
PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error)
}
// XRayClient represents X-Ray client.
type XRayClient struct {
xRay *xray.XRay
}
// PutTraceSegments makes PutTraceSegments api call on X-Ray client.
func (c *XRayClient) PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) {
return c.xRay.PutTraceSegments(input)
}
// PutTelemetryRecords makes PutTelemetryRecords api call on X-Ray client.
func (c *XRayClient) PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) {
return c.xRay.PutTelemetryRecords(input)
}
// NewXRay creates a new instance of the XRay client with a aws configuration and session .
func NewXRay(awsConfig *aws.Config, s *session.Session) XRay {
x := xray.New(s, awsConfig)
log.Debugf("Using Endpoint: %s", x.Endpoint)
execEnv := os.Getenv("AWS_EXECUTION_ENV")
if execEnv == "" {
execEnv = "UNKNOWN"
}
osInformation := runtime.GOOS + "-" + runtime.GOARCH
x.Handlers.Build.PushBackNamed(request.NamedHandler{
Name: "tracing.XRayVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentFreeFormHandler(agentPrefix + cfg.Version + execEnvPrefix + execEnv + osPrefix + osInformation),
})
x.Handlers.Sign.PushFrontNamed(request.NamedHandler{
Name: "tracing.TimestampHandler",
Fn: func(r *request.Request) {
r.HTTPRequest.Header.Set("X-Amzn-Xray-Timestamp", strconv.FormatFloat(float64(time.Now().UnixNano())/float64(time.Second), 'f', 9, 64))
},
})
return &XRayClient{
xRay: x,
}
}
// IsTimeoutError checks whether error is timeout error.
func IsTimeoutError(err error) bool {
awsError, ok := err.(awserr.Error)
if ok {
if strings.Contains(awsError.Error(), "net/http: request canceled") {
return true
}
}
return false
}
| 93 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package logger
import (
"bytes"
"fmt"
"testing"
"time"
"github.com/aws/aws-xray-daemon/pkg/cfg"
"github.com/aws/aws-xray-daemon/pkg/util"
"github.com/cihub/seelog"
"github.com/stretchr/testify/assert"
)
type TestCase struct {
Level seelog.LogLevel
Message string
Params []interface{}
Output string
}
func generateTestCase(t *testing.T, level seelog.LogLevel, formatID string, message string, params ...interface{}) TestCase {
testCase := TestCase{
Level: level,
Message: message,
Params: params,
}
var levelStr string
switch level {
case seelog.ErrorLvl:
levelStr = "Error"
case seelog.InfoLvl:
levelStr = "Info"
case seelog.DebugLvl:
levelStr = "Debug"
case seelog.WarnLvl:
levelStr = "Warn"
case seelog.TraceLvl:
levelStr = "Trace"
case seelog.CriticalLvl:
levelStr = "Critical"
default:
assert.Fail(t, "Unexpected log level", level)
}
msg := fmt.Sprintf(testCase.Message, testCase.Params...)
testCase.Output = fmt.Sprintf("%s [%v] %v\n", time.Now().Format(formatID), levelStr, msg)
return testCase
}
func TestLogger(t *testing.T) {
var testCases []TestCase
formatID := "2006-01-02T15:04:05Z07:00"
for _, logLevel := range []seelog.LogLevel{seelog.DebugLvl, seelog.InfoLvl, seelog.ErrorLvl, seelog.WarnLvl, seelog.TraceLvl, seelog.CriticalLvl} {
testCases = append(testCases, generateTestCase(t, logLevel, formatID, "(some message without parameters)"))
testCases = append(testCases, generateTestCase(t, logLevel, formatID, "(some message with %v as param)", []interface{}{"|a param|"}))
}
for _, testCase := range testCases {
testLogger(t, testCase)
}
}
func testLogger(t *testing.T, testCase TestCase) {
// create seelog logger that outputs to buffer
var out bytes.Buffer
config := &cfg.Config{
Logging: struct {
LogRotation *bool `yaml:"LogRotation"`
LogLevel string `yaml:"LogLevel"`
LogPath string `yaml:"LogPath"`
}{
LogRotation: util.Bool(true),
LogLevel: "dev",
LogPath: "/var/tmp/xray.log",
},
}
// call loadlogconfig method under test
loglevel := "dev"
LoadLogConfig(&out, config, loglevel)
// exercise logger
switch testCase.Level {
case seelog.ErrorLvl:
if len(testCase.Params) > 0 {
seelog.Errorf(testCase.Message, testCase.Params...)
} else {
seelog.Error(testCase.Message)
}
case seelog.InfoLvl:
if len(testCase.Params) > 0 {
seelog.Infof(testCase.Message, testCase.Params...)
} else {
seelog.Info(testCase.Message)
}
case seelog.DebugLvl:
if len(testCase.Params) > 0 {
seelog.Debugf(testCase.Message, testCase.Params...)
} else {
seelog.Debug(testCase.Message)
}
case seelog.WarnLvl:
if len(testCase.Params) > 0 {
seelog.Warnf(testCase.Message, testCase.Params...)
} else {
seelog.Warn(testCase.Message)
}
case seelog.TraceLvl:
if len(testCase.Params) > 0 {
seelog.Tracef(testCase.Message, testCase.Params...)
} else {
seelog.Trace(testCase.Message)
}
case seelog.CriticalLvl:
if len(testCase.Params) > 0 {
seelog.Criticalf(testCase.Message, testCase.Params...)
} else {
seelog.Critical(testCase.Message)
}
default:
assert.Fail(t, "Unexpected log level", testCase.Level)
}
seelog.Flush()
// check result
assert.Equal(t, testCase.Output, out.String())
}
| 150 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package logger
import (
"io"
"github.com/aws/aws-xray-daemon/pkg/cfg"
log "github.com/cihub/seelog"
)
// LoadLogConfig configures Logger.
func LoadLogConfig(writer io.Writer, c *cfg.Config, loglevel string) {
var level log.LogLevel
switch c.Logging.LogLevel {
case "dev":
level = log.TraceLvl
case "debug":
level = log.DebugLvl
case "info":
level = log.InfoLvl
case "warn":
level = log.WarnLvl
case "error":
level = log.ErrorLvl
case "prod":
level = log.InfoLvl
}
if loglevel != c.Logging.LogLevel {
switch loglevel {
case "dev":
level = log.TraceLvl
case "debug":
level = log.DebugLvl
case "info":
level = log.InfoLvl
case "warn":
level = log.WarnLvl
case "error":
level = log.ErrorLvl
case "prod":
level = log.InfoLvl
}
}
logger, _ := log.LoggerFromWriterWithMinLevelAndFormat(writer, level, cfg.LogFormat)
log.ReplaceLogger(logger)
}
| 58 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package processor
import (
"github.com/aws/aws-xray-daemon/pkg/conn"
"github.com/aws/aws-xray-daemon/pkg/telemetry"
"github.com/aws/aws-xray-daemon/pkg/util/timer"
"math/rand"
"regexp"
"time"
"github.com/aws/aws-sdk-go/service/xray"
log "github.com/cihub/seelog"
)
var /* const */ segIdRegexp = regexp.MustCompile(`\"id\":\"(.*?)\"`)
var /* const */ traceIdRegexp = regexp.MustCompile(`\"trace_id\":\"(.*?)\"`)
// Structure for trace segments batch.
type segmentsBatch struct {
// Boolean channel set to true when processing the batch segments is done.
done chan bool
// String slice of trace segments.
batches chan []*string
// Instance of XRay, used to send data to X-Ray service.
xRay conn.XRay
// Random generator, used for back off logic in case of exceptions.
randGen *rand.Rand
// Instance of timer.
timer timer.Timer
}
func (s *segmentsBatch) send(batch []*string) {
select {
case s.batches <- batch:
default:
select {
case batchTruncated := <-s.batches:
telemetry.T.SegmentSpillover(int64(len(batchTruncated)))
log.Warnf("Spilling over %v segments", len(batchTruncated))
default:
log.Debug("Segment batch: channel is de-queued")
}
log.Debug("Segment batch: retrying batch")
s.send(batch)
}
}
func (s *segmentsBatch) poll() {
for {
batch, ok := <-s.batches
if ok {
params := &xray.PutTraceSegmentsInput{
TraceSegmentDocuments: batch,
}
start := time.Now()
// send segment to X-Ray service.
r, err := s.xRay.PutTraceSegments(params)
if err != nil {
telemetry.EvaluateConnectionError(err)
log.Errorf("Sending segment batch failed with: %v", err)
continue
} else {
telemetry.T.SegmentSent(int64(len(batch)))
}
elapsed := time.Since(start)
if len(r.UnprocessedTraceSegments) != 0 {
log.Infof("Sent batch of %d segments but had %d Unprocessed segments (%1.3f seconds)", len(batch),
len(r.UnprocessedTraceSegments), elapsed.Seconds())
batchesMap := make(map[string]string)
for i := 0; i < len(batch); i++ {
segIdStrs := segIdRegexp.FindStringSubmatch(*batch[i])
if len(segIdStrs) != 2 {
log.Debugf("Failed to match \"id\" in segment: %v", *batch[i])
continue
}
batchesMap[segIdStrs[1]] = *batch[i]
}
for _, unprocessedSegment := range r.UnprocessedTraceSegments {
telemetry.T.SegmentRejected(1)
// Print all segments since don't know which exact one is invalid.
if unprocessedSegment.Id == nil {
log.Debugf("Received nil unprocessed segment id from X-Ray service: %v", unprocessedSegment)
log.Debugf("Content in this batch: %v", params)
break
}
traceIdStrs := traceIdRegexp.FindStringSubmatch(batchesMap[*unprocessedSegment.Id])
if len(traceIdStrs) != 2 {
log.Errorf("Unprocessed segment: %v", unprocessedSegment)
} else {
log.Errorf("Unprocessed trace %v, segment: %v", traceIdStrs[1], unprocessedSegment)
}
log.Debugf(batchesMap[*unprocessedSegment.Id])
}
} else {
log.Infof("Successfully sent batch of %d segments (%1.3f seconds)", len(batch), elapsed.Seconds())
}
} else {
log.Trace("Segment batch: done!")
s.done <- true
break
}
}
}
func (s *segmentsBatch) close() {
close(s.batches)
}
| 124 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package processor
import (
"errors"
"fmt"
"strings"
"testing"
"github.com/aws/aws-sdk-go/service/xray"
"github.com/aws/aws-xray-daemon/pkg/util/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
var doneMsg = "Segment batch: done!"
type MockXRayClient struct {
mock.Mock
CallNoToPutTraceSegments int
input *xray.PutTraceSegmentsInput
}
func (c *MockXRayClient) PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) {
c.input = input
c.CallNoToPutTraceSegments++
args := c.Called(nil)
errorStr := args.String(0)
var err error
output := &xray.PutTraceSegmentsOutput{}
if errorStr == "Send unprocessed" {
segmentID := "Test-Segment-Id-1242113"
output.UnprocessedTraceSegments = append(output.UnprocessedTraceSegments, &xray.UnprocessedTraceSegment{Id: &segmentID})
} else if errorStr == "Send Invalid" {
output.UnprocessedTraceSegments = append(output.UnprocessedTraceSegments, &xray.UnprocessedTraceSegment{Id: nil})
} else if errorStr != "" {
err = errors.New(errorStr)
}
return output, err
}
func (c *MockXRayClient) PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) {
return nil, nil
}
func TestSendOneBatch(t *testing.T) {
s := segmentsBatch{
batches: make(chan []*string, 1),
}
testMessage := "Test Message"
batch := []*string{&testMessage}
s.send(batch)
returnedBatch := <-s.batches
assert.EqualValues(t, len(returnedBatch), 1)
batchString := *returnedBatch[0]
assert.EqualValues(t, batchString, testMessage)
}
func TestSendBatchChannelTruncate(t *testing.T) {
log := test.LogSetup()
s := segmentsBatch{
batches: make(chan []*string, 1),
}
testMessage := "Test Message"
batch := []*string{&testMessage}
testMessage2 := "Test Message 2"
batch2 := []*string{&testMessage2}
s.send(batch)
s.send(batch2)
returnedBatch := <-s.batches
assert.EqualValues(t, len(returnedBatch), 1)
assert.EqualValues(t, *returnedBatch[0], testMessage2)
assert.True(t, strings.Contains(log.Logs[0], "Spilling over"))
assert.True(t, strings.Contains(log.Logs[1], "retrying batch"))
}
func TestPollSendSuccess(t *testing.T) {
log := test.LogSetup()
xRay := new(MockXRayClient)
xRay.On("PutTraceSegments", nil).Return("").Once()
s := segmentsBatch{
batches: make(chan []*string, 1),
xRay: xRay,
done: make(chan bool),
}
testMessage := "{\"id\":\"9472\""
batch := []*string{&testMessage}
s.send(batch)
go s.poll()
close(s.batches)
<-s.done
assert.EqualValues(t, xRay.CallNoToPutTraceSegments, 1)
assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Successfully sent batch of %v", 1)))
assert.True(t, strings.Contains(log.Logs[1], doneMsg))
}
func TestPutTraceSegmentsParameters(t *testing.T) {
log := test.LogSetup()
xRay := new(MockXRayClient)
xRay.On("PutTraceSegments", nil).Return("").Once()
s := segmentsBatch{
batches: make(chan []*string, 1),
xRay: xRay,
done: make(chan bool),
}
testMessage := "{\"id\":\"9472\""
batch := []*string{&testMessage}
s.send(batch)
go s.poll()
close(s.batches)
<-s.done
actualInput := xRay.input
expectedInput := &xray.PutTraceSegmentsInput{
TraceSegmentDocuments: batch,
}
assert.EqualValues(t, actualInput, expectedInput)
assert.EqualValues(t, xRay.CallNoToPutTraceSegments, 1)
assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Successfully sent batch of %v", 1)))
assert.True(t, strings.Contains(log.Logs[1], doneMsg))
}
func TestPollSendReturnUnprocessed(t *testing.T) {
log := test.LogSetup()
xRay := new(MockXRayClient)
xRay.On("PutTraceSegments", nil).Return("Send unprocessed").Once()
s := segmentsBatch{
batches: make(chan []*string, 1),
xRay: xRay,
done: make(chan bool),
}
testMessage := "{\"id\":\"9472\""
batch := []*string{&testMessage}
s.send(batch)
go s.poll()
close(s.batches)
<-s.done
assert.EqualValues(t, xRay.CallNoToPutTraceSegments, 1)
assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Sent batch of %v segments but had %v Unprocessed segments", 1, 1)))
assert.True(t, strings.Contains(log.Logs[1], "Unprocessed segment"))
}
func TestPollSendReturnUnprocessedInvalid(t *testing.T) {
log := test.LogSetup()
xRay := new(MockXRayClient)
xRay.On("PutTraceSegments", nil).Return("Send Invalid").Once()
s := segmentsBatch{
batches: make(chan []*string, 1),
xRay: xRay,
done: make(chan bool),
}
testMessage := "{\"id\":\"9472\""
batch := []*string{&testMessage}
s.send(batch)
go s.poll()
close(s.batches)
<-s.done
assert.EqualValues(t, xRay.CallNoToPutTraceSegments, 1)
assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Sent batch of %v segments but had %v Unprocessed segments", 1, 1)))
assert.True(t, strings.Contains(log.Logs[1], "Received nil unprocessed segment id from X-Ray service"))
}
| 186 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package processor
import (
"sync/atomic"
"time"
log "github.com/cihub/seelog"
"github.com/aws/aws-xray-daemon/pkg/bufferpool"
"github.com/aws/aws-xray-daemon/pkg/ringbuffer"
"github.com/aws/aws-xray-daemon/pkg/tracesegment"
"github.com/aws/aws-xray-daemon/pkg/cfg"
"github.com/aws/aws-xray-daemon/pkg/conn"
"github.com/aws/aws-xray-daemon/pkg/util/timer"
"math/rand"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
)
// Processor buffers segments and send to X-Ray service.
type Processor struct {
// Boolean channel, set to true when processor has no segments in priority and standard ring buffer.
Done chan bool
// Ring buffer to store trace segments.
std *ringbuffer.RingBuffer
// Buffer pool instance.
pool *bufferpool.BufferPool
// Counter for segments received.
count uint64
// timer client used for setting idle timer.
timerClient timer.Timer
// segmentsBatch is used to process received segments batch.
traceSegmentsBatch *segmentsBatch
// Number of go routines to spawn for traceSegmentsBatch.poll().
batchProcessorCount int
// Channel for Time.
idleTimer <-chan time.Time
// Size of the batch segments processed by Processor.
batchSize int
// Idle timeout in milliseconds used while sending batch segments.
sendIdleTimeout time.Duration
}
// New creates new instance of Processor.
func New(awsConfig *aws.Config, s *session.Session, segmentBatchProcessorCount int, std *ringbuffer.RingBuffer,
pool *bufferpool.BufferPool, c *cfg.ParameterConfig) *Processor {
batchesChan := make(chan []*string, c.Processor.BatchProcessorQueueSize)
segmentBatchDoneChan := make(chan bool)
tsb := &segmentsBatch{
batches: batchesChan,
done: segmentBatchDoneChan,
randGen: rand.New(rand.NewSource(time.Now().UnixNano())),
timer: &timer.Client{},
}
x := conn.NewXRay(awsConfig, s)
if x == nil {
log.Error("X-Ray client returned nil")
os.Exit(1)
}
tsb.xRay = x
doneChan := make(chan bool)
log.Debugf("Batch size: %v", c.Processor.BatchSize)
p := &Processor{
Done: doneChan,
std: std,
pool: pool,
count: 0,
timerClient: &timer.Client{},
batchProcessorCount: segmentBatchProcessorCount,
traceSegmentsBatch: tsb,
batchSize: c.Processor.BatchSize,
sendIdleTimeout: time.Millisecond * time.Duration(c.Processor.IdleTimeoutMillisecond),
}
for i := 0; i < p.batchProcessorCount; i++ {
go p.traceSegmentsBatch.poll()
}
go p.poll()
return p
}
func (p *Processor) poll() {
batch := make([]*tracesegment.TraceSegment, 0, p.batchSize)
p.SetIdleTimer()
for {
select {
case segment, ok := <-p.std.Channel:
if ok {
batch = p.receiveTraceSegment(segment, batch)
} else {
p.std.Empty = true
}
case <-p.idleTimer:
if len(batch) > 0 {
log.Debug("processor: sending partial batch")
batch = p.sendBatchAsync(batch)
} else {
p.SetIdleTimer()
}
}
if p.std.Empty {
break
}
}
if len(batch) > 0 {
batch = p.sendBatchAsync(batch)
}
p.traceSegmentsBatch.close()
for i := 0; i < p.batchProcessorCount; i++ {
<-p.traceSegmentsBatch.done
}
log.Debug("processor: done!")
p.Done <- true
}
func (p *Processor) receiveTraceSegment(ts *tracesegment.TraceSegment, batch []*tracesegment.TraceSegment) []*tracesegment.TraceSegment {
atomic.AddUint64(&p.count, 1)
batch = append(batch, ts)
if len(batch) >= p.batchSize {
log.Debug("processor: sending complete batch")
batch = p.sendBatchAsync(batch)
} else if p.pool.CurrentBuffersLen() == 0 {
log.Debug("processor: sending partial batch due to load on buffer pool")
batch = p.sendBatchAsync(batch)
}
return batch
}
// Resizing slice doesn't make a copy of the underlying array and hence memory is not
// garbage collected. (http://blog.golang.org/go-slices-usage-and-internals)
func (p *Processor) flushBatch(batch []*tracesegment.TraceSegment) []*tracesegment.TraceSegment {
for i := 0; i < len(batch); i++ {
batch[i] = nil
}
batch = batch[0:0]
return batch
}
func (p *Processor) sendBatchAsync(batch []*tracesegment.TraceSegment) []*tracesegment.TraceSegment {
log.Debugf("processor: segment batch size: %d. capacity: %d", len(batch), cap(batch))
segmentDocuments := []*string{}
for _, segment := range batch {
rawBytes := *segment.Raw
x := string(rawBytes[:])
segmentDocuments = append(segmentDocuments, &x)
p.pool.Return(segment.PoolBuf)
}
p.traceSegmentsBatch.send(segmentDocuments)
// Reset Idle Timer
p.SetIdleTimer()
return p.flushBatch(batch)
}
// ProcessedCount returns number of trace segment received.
func (p *Processor) ProcessedCount() uint64 {
return atomic.LoadUint64(&p.count)
}
// SetIdleTimer sets idle timer for the processor instance.
func (p *Processor) SetIdleTimer() {
p.idleTimer = p.timerClient.After(p.sendIdleTimeout)
}
| 193 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package processor
import (
"fmt"
"github.com/aws/aws-xray-daemon/pkg/bufferpool"
"github.com/aws/aws-xray-daemon/pkg/ringbuffer"
"github.com/aws/aws-xray-daemon/pkg/telemetry"
"github.com/aws/aws-xray-daemon/pkg/tracesegment"
"github.com/aws/aws-xray-daemon/pkg/util/test"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func init() {
telemetry.T = telemetry.GetTestTelemetry()
}
func TestFlushBatch(t *testing.T) {
variousTests := []int{0, 10, 100, 324}
for _, testCase := range variousTests {
processor := Processor{}
segments := make([]*tracesegment.TraceSegment, testCase)
for i := 0; i < testCase; i++ {
segmentVal := tracesegment.GetTestTraceSegment()
segments[i] = &segmentVal
}
segmentsFlushed := processor.flushBatch(segments)
assert.Equal(t, len(segmentsFlushed), 0)
assert.Equal(t, cap(segmentsFlushed), testCase)
for _, segmentVal := range segmentsFlushed {
assert.Nil(t, segmentVal)
}
}
}
func TestSendBatchSuccess(t *testing.T) {
timer := test.MockTimerClient{}
variousTests := []int{0, 50, 40}
for _, testCase := range variousTests {
writer := test.LogSetup()
segments := make([]*tracesegment.TraceSegment, testCase)
for i := 0; i < testCase; i++ {
segmentVal := tracesegment.GetTestTraceSegment()
segments[i] = &segmentVal
}
processor := Processor{
pool: bufferpool.Init(testCase+1, 100),
timerClient: &timer,
traceSegmentsBatch: &segmentsBatch{
batches: make(chan []*string, 1),
},
}
// Empty Pool
for i := 0; i < testCase+1; i++ {
processor.pool.Get()
}
assert.EqualValues(t, processor.pool.CurrentBuffersLen(), 0)
returnedSegment := processor.sendBatchAsync(segments)
assert.EqualValues(t, cap(returnedSegment), cap(segments))
assert.EqualValues(t, len(returnedSegment), 0)
for _, segmentVal := range returnedSegment {
assert.Nil(t, segmentVal)
}
assert.True(t, strings.Contains(writer.Logs[0], fmt.Sprintf("segment batch size: %v", testCase)))
select {
case batch := <-processor.traceSegmentsBatch.batches:
assert.NotNil(t, batch)
default:
assert.Fail(t, "Expected batch to be in batch channel")
}
// Asserting the buffer pool was returned
assert.EqualValues(t, processor.pool.CurrentBuffersLen(), testCase)
}
}
func TestPollingFewSegmentsExit(t *testing.T) {
pool := bufferpool.Init(1, 100)
stdChan := ringbuffer.New(20, pool)
doneChan := make(chan bool)
timer := &test.MockTimerClient{}
writer := test.LogSetup()
processor := &Processor{
timerClient: timer,
std: stdChan,
count: 0,
Done: doneChan,
pool: pool,
traceSegmentsBatch: &segmentsBatch{
batches: make(chan []*string, 1),
},
sendIdleTimeout: time.Second,
batchSize: 50,
}
go processor.poll()
// Increment for Send Batch to proceed
timer.Advance(time.Duration(10))
segment := tracesegment.GetTestTraceSegment()
stdChan.Send(&segment)
stdChan.Close()
<-processor.Done
assert.EqualValues(t, processor.ProcessedCount(), 1)
assert.True(t, strings.Contains(writer.Logs[0], "segment batch size: 1"))
assert.True(t, strings.Contains(writer.Logs[1], "processor: done!"))
}
func TestPollingFewSegmentsIdleTimeout(t *testing.T) {
pool := bufferpool.Init(1, 100)
stdChan := ringbuffer.New(20, pool)
doneChan := make(chan bool)
timer := &test.MockTimerClient{}
writer := test.LogSetup()
processor := &Processor{
timerClient: timer,
std: stdChan,
count: 0,
Done: doneChan,
pool: pool,
traceSegmentsBatch: &segmentsBatch{
batches: make(chan []*string, 1),
},
sendIdleTimeout: time.Second,
batchSize: 50,
}
go processor.poll()
// Sleep to process go routine initialization
time.Sleep(time.Millisecond)
// Adding segment to priChan
segment := tracesegment.GetTestTraceSegment()
stdChan.Send(&segment)
// Sleep to see to it the chan is processed before timeout is triggered
time.Sleep(time.Millisecond)
// Trigger Ideal Timeout to trigger PutSegments
timer.Advance(processor.sendIdleTimeout)
time.Sleep(time.Millisecond)
// Sleep so that time.After trigger batch send and not closing of the channel
stdChan.Close()
<-doneChan
assert.True(t, strings.Contains(writer.Logs[0], "sending partial batch"))
assert.True(t, strings.Contains(writer.Logs[1], "segment batch size: 1"))
assert.True(t, strings.Contains(writer.Logs[2], "processor: done!"))
}
func TestPollingBatchBufferFull(t *testing.T) {
batchSize := 50
pool := bufferpool.Init(1, 100)
// Setting stdChan to batchSize so that it does not spill over
stdChan := ringbuffer.New(batchSize, pool)
doneChan := make(chan bool)
timer := &test.MockTimerClient{}
writer := test.LogSetup()
segmentProcessorCount := 1
processor := &Processor{
timerClient: timer,
std: stdChan,
count: 0,
Done: doneChan,
batchProcessorCount: segmentProcessorCount,
pool: pool,
traceSegmentsBatch: &segmentsBatch{
batches: make(chan []*string, 1),
done: make(chan bool),
},
batchSize: batchSize,
}
go processor.poll()
for i := 0; i < batchSize; i++ {
// Adding segment to priChan
segment := tracesegment.GetTestTraceSegment()
stdChan.Send(&segment)
}
stdChan.Close()
processor.traceSegmentsBatch.done <- true
<-doneChan
assert.EqualValues(t, processor.ProcessedCount(), batchSize)
assert.True(t, strings.Contains(writer.Logs[0], "sending complete batch"))
assert.True(t, strings.Contains(writer.Logs[1], fmt.Sprintf("segment batch size: %v", batchSize)))
assert.True(t, strings.Contains(writer.Logs[2], "processor: done!"))
}
func TestPollingBufferPoolExhaustedForcingSent(t *testing.T) {
pool := bufferpool.Init(1, 100)
batchSize := 50
// Exhaust the buffer pool
pool.Get()
assert.EqualValues(t, pool.CurrentBuffersLen(), 0)
stdChan := ringbuffer.New(batchSize, pool)
doneChan := make(chan bool)
timer := &test.MockTimerClient{}
writer := test.LogSetup()
segmentProcessorCount := 1
processor := &Processor{
timerClient: timer,
std: stdChan,
count: 0,
Done: doneChan,
batchProcessorCount: segmentProcessorCount,
pool: pool,
traceSegmentsBatch: &segmentsBatch{
batches: make(chan []*string, 1),
done: make(chan bool),
},
sendIdleTimeout: time.Second,
batchSize: batchSize,
}
go processor.poll()
segment := tracesegment.GetTestTraceSegment()
stdChan.Send(&segment)
stdChan.Close()
processor.traceSegmentsBatch.done <- true
<-doneChan
assert.EqualValues(t, processor.ProcessedCount(), 1)
assert.True(t, strings.Contains(writer.Logs[0], "sending partial batch due to load on buffer pool"))
assert.True(t, strings.Contains(writer.Logs[1], fmt.Sprintf("segment batch size: %v", 1)))
assert.True(t, strings.Contains(writer.Logs[2], "processor: done!"))
}
func TestPollingIdleTimerIsInitiatedAfterElapseWithNoSegments(t *testing.T) {
timer := &test.MockTimerClient{}
pool := bufferpool.Init(1, 100)
batchSize := 50
stdChan := ringbuffer.New(batchSize, pool)
processor := &Processor{
Done: make(chan bool),
timerClient: timer,
std: stdChan,
pool: pool,
traceSegmentsBatch: &segmentsBatch{
batches: make(chan []*string, 1),
},
sendIdleTimeout: time.Second,
batchSize: batchSize,
}
go processor.poll()
// Sleep for routine to be initiated
time.Sleep(time.Millisecond)
// Trigger Idle Timeout
timer.Advance(processor.sendIdleTimeout)
// sleep so that routine exist after timeout is tiggered
time.Sleep(time.Millisecond)
stdChan.Close()
<-processor.Done
// Called twice once at poll start and then after the timeout was triggered
assert.EqualValues(t, timer.AfterCalledTimes(), 2)
}
| 284 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package profiler
import (
"os"
"runtime/pprof"
log "github.com/cihub/seelog"
)
// EnableCPUProfile enables CPU profiling.
func EnableCPUProfile(cpuProfile *string) {
if *cpuProfile != "" {
f, err := os.Create(*cpuProfile)
if err != nil {
log.Errorf("error: %v", err)
}
pprof.StartCPUProfile(f)
log.Info("Start CPU Profiling")
}
}
// MemSnapShot creates memory profile.
func MemSnapShot(memProfile *string) {
if *memProfile != "" {
f, err := os.Create(*memProfile)
if err != nil {
log.Errorf("Could not create memory profile: %v", err)
}
if err := pprof.WriteHeapProfile(f); err != nil {
log.Errorf("Could not write memory profile: %v", err)
}
err = f.Close()
if err != nil {
log.Errorf("unable to close file: %v", err)
}
log.Info("Finish memory profiling")
return
}
}
| 49 |
aws-xray-daemon | aws | Go | // Package proxy provides an http server to act as a signing proxy for SDKs calling AWS X-Ray APIs
package proxy
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-xray-daemon/pkg/cfg"
"github.com/aws/aws-xray-daemon/pkg/conn"
log "github.com/cihub/seelog"
)
const service = "xray"
const connHeader = "Connection"
// Server represents HTTP server.
type Server struct {
*http.Server
}
// NewServer returns a proxy server listening on the given address.
// Requests are forwarded to the endpoint in the given config.
// Requests are signed using credentials from the given config.
func NewServer(cfg *cfg.Config, awsCfg *aws.Config, sess *session.Session) (*Server, error) {
_, err := net.ResolveTCPAddr("tcp", cfg.Socket.TCPAddress)
if err != nil {
log.Errorf("%v", err)
os.Exit(1)
}
endPoint, er := getServiceEndpoint(awsCfg)
if er != nil {
return nil, fmt.Errorf("%v", er)
}
log.Infof("HTTP Proxy server using X-Ray Endpoint : %v", endPoint)
// Parse url from endpoint
url, err := url.Parse(endPoint)
if err != nil {
return nil, fmt.Errorf("unable to parse xray endpoint: %v", err)
}
signer := &v4.Signer{
Credentials: sess.Config.Credentials,
}
transport := conn.ProxyServerTransport(cfg)
// Reverse proxy handler
handler := &httputil.ReverseProxy{
Transport: transport,
// Handler for modifying and forwarding requests
Director: func(req *http.Request) {
if req != nil && req.URL != nil {
log.Debugf("Received request on HTTP Proxy server : %s", req.URL.String())
} else {
log.Debug("Request/Request.URL received on HTTP Proxy server is nil")
}
// Remove connection header before signing request, otherwise the
// reverse-proxy will remove the header before forwarding to X-Ray
// resulting in a signed header being missing from the request.
req.Header.Del(connHeader)
// Set req url to xray endpoint
req.URL.Scheme = url.Scheme
req.URL.Host = url.Host
req.Host = url.Host
// Consume body and convert to io.ReadSeeker for signer to consume
body, err := consume(req.Body)
if err != nil {
log.Errorf("Unable to consume request body: %v", err)
// Forward unsigned request
return
}
// Sign request. signer.Sign() also repopulates the request body.
_, err = signer.Sign(req, body, service, *awsCfg.Region, time.Now())
if err != nil {
log.Errorf("Unable to sign request: %v", err)
}
},
}
server := &http.Server{
Addr: cfg.Socket.TCPAddress,
Handler: handler,
}
p := &Server{server}
return p, nil
}
// consume readsAll() the body and creates a new io.ReadSeeker from the content. v4.Signer
// requires an io.ReadSeeker to be able to sign requests. May return a nil io.ReadSeeker.
func consume(body io.ReadCloser) (io.ReadSeeker, error) {
var buf []byte
// Return nil ReadSeeker if body is nil
if body == nil {
return nil, nil
}
// Consume body
buf, err := ioutil.ReadAll(body)
if err != nil {
return nil, err
}
return bytes.NewReader(buf), nil
}
// Serve starts server.
func (s *Server) Serve() {
log.Infof("Starting proxy http server on %s", s.Addr)
if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Errorf("proxy http server failed to listen: %v", err)
}
}
// Close stops server.
func (s *Server) Close() {
err := s.Server.Close()
if err != nil {
log.Errorf("unable to close the server: %v", err)
}
}
// getServiceEndpoint returns X-Ray service endpoint.
// It is guaranteed that awsCfg config instance is non-nil and the region value is non nil or non empty in awsCfg object.
// Currently the caller takes care of it.
func getServiceEndpoint(awsCfg *aws.Config) (string, error) {
if awsCfg.Endpoint == nil || *awsCfg.Endpoint == "" {
if awsCfg.Region == nil || *awsCfg.Region == "" {
return "", errors.New("unable to generate endpoint from region with nil value")
}
resolved, err := endpoints.DefaultResolver().EndpointFor(service, *awsCfg.Region, setResolverConfig())
if err != nil {
return "", err
}
return resolved.URL, err
}
return *awsCfg.Endpoint, nil
}
func setResolverConfig() func(*endpoints.Options) {
return func(p *endpoints.Options) {
p.ResolveUnknownService = true
}
}
| 170 |
aws-xray-daemon | aws | Go | package proxy
import (
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-xray-daemon/pkg/cfg"
"github.com/stretchr/testify/assert"
)
// Assert that consume returns a ReadSeeker with the same content as the
// ReadCloser passed in.
func TestConsume(t *testing.T) {
// Create an io.Reader
r := strings.NewReader("Content")
// Create an io.ReadCloser
rc := ioutil.NopCloser(r)
// Consume ReadCloser and create ReadSeeker
rs, err := consume(rc)
assert.Nil(t, err)
// Read from ReadSeeker
bytes, err := ioutil.ReadAll(rs)
assert.Nil(t, err)
// Assert contents of bytes are same as contents of original Reader
assert.Equal(t, "Content", string(bytes))
}
// Assert that consume returns a nil ReadSeeker when a nil ReadCloser is passed in
func TestConsumeNilBody(t *testing.T) {
// Create a nil io.ReadCloser
var rc io.ReadCloser
// Consume ReadCloser and create ReadSeeker
rs, err := consume(rc)
assert.Nil(t, err)
assert.Nil(t, rs)
}
// Assert that Director modifies the passed in http.Request
func TestDirector(t *testing.T) {
// Create dummy credentials to sign with
cred := credentials.NewStaticCredentials("id", "secret", "token")
// Create dummy aws Config
awsCfg := &aws.Config{
Endpoint: aws.String("https://xray.us-east-1.amazonaws.com"),
Region: aws.String("us-east-1"),
Credentials: cred,
}
// Create dummy aws Session
sess := &session.Session{
Config: awsCfg,
}
// Create proxy server
s, err := NewServer(cfg.DefaultConfig(), awsCfg, sess)
assert.Nil(t, err)
// Extract director from server
d := s.Handler.(*httputil.ReverseProxy).Director
// Create http request to pass to director
url, err := url.Parse("http://127.0.0.1:2000")
assert.Nil(t, err)
header := map[string][]string{
"Connection": []string{},
}
req := &http.Request{
URL: url,
Host: "127.0.0.1",
Header: header,
Body: ioutil.NopCloser(strings.NewReader("Body")),
}
// Apply director to request
d(req)
// Assert that the url was changed to point to AWS X-Ray
assert.Equal(t, "https", req.URL.Scheme)
assert.Equal(t, "xray.us-east-1.amazonaws.com", req.URL.Host)
assert.Equal(t, "xray.us-east-1.amazonaws.com", req.Host)
// Assert that additional headers were added by the signer
assert.Contains(t, req.Header, "Authorization")
assert.Contains(t, req.Header, "X-Amz-Security-Token")
assert.Contains(t, req.Header, "X-Amz-Date")
assert.NotContains(t, req.Header, "Connection")
}
// Fetching endpoint from aws config instance
func TestEndpoint1(t *testing.T) {
e := "https://xray.us-east-1.amazonaws.com"
awsCfg := &aws.Config{
Endpoint: aws.String(e), // Endpoint value has higher priority than region value
Region: aws.String("us-west-1"),
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, e, result, "Fetching endpoint from config instance")
assert.Nil(t, err)
}
// Generating endpoint from region value of awsCfg instance
func TestEndpoint2(t *testing.T) {
e := "https://xray.us-west-1.amazonaws.com"
awsCfg := &aws.Config{
Region: aws.String("us-west-1"), // No endpoint
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, e, result, "Fetching endpoint from region")
assert.Nil(t, err)
}
// Error received when no endpoint and region value present in awsCfg instance
func TestEndpoint3(t *testing.T) {
awsCfg := &aws.Config{
// No endpoint and region value
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, "", result, "Endpoint cannot be created")
assert.NotNil(t, err)
}
func TestEndpoint4(t *testing.T) {
awsCfg := &aws.Config{
// region value set to ""
Region: aws.String(""),
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, "", result, "Endpoint cannot be created")
assert.NotNil(t, err)
}
func TestEndpoint5(t *testing.T) {
e := "https://xray.us-west-1.amazonaws.com"
awsCfg := &aws.Config{
Endpoint: aws.String(""), // Endpoint set to ""
Region: aws.String("us-west-1"), // No endpoint
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, e, result, "Endpoint created from region value")
assert.Nil(t, err)
}
// Testing AWS China partition
func TestEndpoint6(t *testing.T) {
e := "https://xray.cn-northwest-1.amazonaws.com.cn"
awsCfg := &aws.Config{
Endpoint: aws.String(""),
Region: aws.String("cn-northwest-1"),
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, e, result, "creating endpoint from region")
assert.Nil(t, err)
}
// Testing AWS China partition
func TestEndpoint7(t *testing.T) {
e := "https://xray.cn-north-1.amazonaws.com.cn"
awsCfg := &aws.Config{
Endpoint: aws.String(""),
Region: aws.String("cn-north-1"),
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, e, result, "creating endpoint from region")
assert.Nil(t, err)
}
// Testing AWS Gov partition
func TestEndpoint8(t *testing.T) {
e := "https://xray.us-gov-east-1.amazonaws.com"
awsCfg := &aws.Config{
Endpoint: aws.String(""),
Region: aws.String("us-gov-east-1"),
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, e, result, "creating endpoint from region")
assert.Nil(t, err)
}
// Testing AWS Gov partition
func TestEndpoint9(t *testing.T) {
e := "https://xray.us-gov-west-1.amazonaws.com"
awsCfg := &aws.Config{
Endpoint: aws.String(""),
Region: aws.String("us-gov-west-1"),
}
result, err := getServiceEndpoint(awsCfg)
assert.Equal(t, e, result, "creating endpoint from region")
assert.Nil(t, err)
}
| 206 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package ringbuffer
import (
log "github.com/cihub/seelog"
"os"
"github.com/aws/aws-xray-daemon/pkg/bufferpool"
"github.com/aws/aws-xray-daemon/pkg/telemetry"
"github.com/aws/aws-xray-daemon/pkg/tracesegment"
)
var defaultCapacity = 250
// RingBuffer is used to store trace segment received on X-Ray daemon address.
type RingBuffer struct {
// Channel used to store trace segment received on X-Ray daemon address.
Channel <-chan *tracesegment.TraceSegment
c chan *tracesegment.TraceSegment
// Boolean, set to true of buffer is empty
Empty bool
// Counter for trace segments truncated.
count uint64
// Reference to BufferPool.
pool *bufferpool.BufferPool
}
// New returns new instance of RingBuffer configured with BufferPool pool.
func New(bufferCount int, pool *bufferpool.BufferPool) *RingBuffer {
if bufferCount == 0 {
log.Error("The initial size of a queue should be larger than 0")
os.Exit(1)
}
capacity := getChannelSize(bufferCount)
channel := make(chan *tracesegment.TraceSegment, capacity)
return &RingBuffer{
Channel: channel,
c: channel,
Empty: false,
count: 0,
pool: pool,
}
}
// getChannelSize returns the size of the channel used by RingBuffer
// Currently 1X times the total number of allocated buffers for the X-Ray daemon is returned.
// This is proportional to number of buffers, since the segments are dropped if no new buffer can be allocated.
// max(defaultCapacity, bufferCount) is returned by the function.
func getChannelSize(bufferCount int) int {
capacity := 1 * bufferCount
if capacity < defaultCapacity {
return defaultCapacity
}
return capacity
}
// Send sends trace segment s to trace segment channel.
func (r *RingBuffer) Send(s *tracesegment.TraceSegment) {
select {
case r.c <- s:
default:
var segmentTruncated *tracesegment.TraceSegment
select {
case segmentTruncated = <-r.c:
r.count++
r.pool.Return(segmentTruncated.PoolBuf)
log.Warn("Segment buffer is full. Dropping oldest segment document.")
telemetry.T.SegmentSpillover(1)
default:
log.Trace("Buffers: channel was de-queued")
}
r.Send(s)
}
}
// Close closes the RingBuffer.
func (r *RingBuffer) Close() {
close(r.c)
}
// TruncatedCount returns trace segment truncated count.
func (r *RingBuffer) TruncatedCount() uint64 {
return r.count
}
| 98 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package ringbuffer
import (
"math/rand"
"os"
"os/exec"
"strings"
"testing"
"github.com/aws/aws-xray-daemon/pkg/bufferpool"
"github.com/aws/aws-xray-daemon/pkg/telemetry"
"github.com/aws/aws-xray-daemon/pkg/tracesegment"
"github.com/aws/aws-xray-daemon/pkg/util/test"
"github.com/stretchr/testify/assert"
)
func init() {
telemetry.T = telemetry.GetTestTelemetry()
}
func TestRingBufferNewWithZeroCapacity(t *testing.T) {
bufferLimit := 100
bufferSize := 256 * 1024
bufferPool := bufferpool.Init(bufferLimit, bufferSize)
// Only run the failing part when a specific env variable is set
if os.Getenv("Test_New") == "1" {
New(0, bufferPool)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=TestRingBufferNewWithZeroCapacity")
cmd.Env = append(os.Environ(), "Test_New=1")
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Check that the program exited
err := cmd.Wait()
if e, ok := err.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
}
func TestRingBufferNewWithDefaultCapacity(t *testing.T) {
bufferLimit := 100
bufferSize := 256 * 1024
bufferPool := bufferpool.Init(bufferLimit, bufferSize)
randomFlag := rand.Intn(defaultCapacity - 1)
ringBuffer := New(randomFlag, bufferPool) // Ring buffer initialized with less than default capacity value
assert.Equal(t, defaultCapacity, cap(ringBuffer.c), "The size of buffered channel should be equal to default capacity")
assert.Equal(t, defaultCapacity, cap(ringBuffer.Channel), "The size of buffered channel should be equal to default capacity")
assert.Equal(t, false, ringBuffer.Empty, "The ringBuffer is not empty")
assert.Equal(t, uint64(0), ringBuffer.count, "The truncated count should be 0")
assert.Equal(t, bufferPool, ringBuffer.pool, "The value of bufferpool should be same with the given value")
}
func TestRingBufferNew(t *testing.T) { // RingBuffer size greater than defaultCapacity
bufferLimit := 100
bufferSize := 256 * 1024
bufferPool := bufferpool.Init(bufferLimit, bufferSize)
randomFlag := getTestChannelSize()
ringBuffer := New(randomFlag, bufferPool)
assert.Equal(t, randomFlag, cap(ringBuffer.c), "The size of buffered channel should be same with the given number")
assert.Equal(t, randomFlag, cap(ringBuffer.Channel), "The size of buffered channel should be same with the given number")
assert.Equal(t, false, ringBuffer.Empty, "The ringBuffer is not empty")
assert.Equal(t, uint64(0), ringBuffer.count, "The truncated count should be 0")
assert.Equal(t, bufferPool, ringBuffer.pool, "The value of bufferpool should be same with the given value")
}
func TestRingBufferCloseChannel(t *testing.T) {
bufferLimit := 100
bufferSize := 256 * 1024
bufferPool := bufferpool.Init(bufferLimit, bufferSize)
randomFlag := getTestChannelSize()
ringBuffer := New(randomFlag, bufferPool)
ringBuffer.Close()
for i := 0; i < cap(ringBuffer.c); i++ {
v, ok := <-ringBuffer.c
assert.Equal(t, (*tracesegment.TraceSegment)(nil), v, "The value should be nil")
assert.Equal(t, false, ok, "The value should be false if the channel is closed")
}
}
func TestRingBufferSend(t *testing.T) {
bufferLimit := 100
bufferSize := 256 * 1024
bufferPool := bufferpool.Init(bufferLimit, bufferSize)
randomFlag := getTestChannelSize()
ringBuffer := New(randomFlag, bufferPool)
segment := tracesegment.GetTestTraceSegment()
for i := 0; i < randomFlag; i++ {
ringBuffer.Send(&segment)
}
for i := 0; i < cap(ringBuffer.c); i++ {
v, ok := <-ringBuffer.c
assert.Equal(t, &segment, v, "The value should be same with the send segment")
assert.Equal(t, true, ok, "The channel is open")
}
}
func TestRingBufferTruncatedCount(t *testing.T) {
log := test.LogSetup()
bufferLimit := 100
bufferSize := 256 * 1024
bufferPool := bufferpool.Init(bufferLimit, bufferSize)
segment := tracesegment.GetTestTraceSegment()
randomFlag := getTestChannelSize()
ringBuffer := New(randomFlag, bufferPool)
extraSegments := 100
for i := 0; i < randomFlag+extraSegments; i++ {
ringBuffer.Send(&segment)
}
num := ringBuffer.TruncatedCount()
assert.Equal(t, num, uint64(extraSegments), "The truncated count should be same with the extra segments sent")
for i := 0; i < extraSegments; i++ {
assert.True(t, strings.Contains(log.Logs[i], "Segment buffer is full. Dropping oldest segment document."))
}
}
func TestRingBufferSendTruncated(t *testing.T) {
log := test.LogSetup()
bufferLimit := 100
bufferSize := 256 * 1024
bufferPool := bufferpool.Init(bufferLimit, bufferSize)
randomFlag := getTestChannelSize() + 2
ringBuffer := New(randomFlag, bufferPool)
var segment []tracesegment.TraceSegment
for i := 0; i < randomFlag; i++ {
segment = append(segment, tracesegment.GetTestTraceSegment())
ringBuffer.Send(&segment[i])
}
s1 := tracesegment.GetTestTraceSegment()
ringBuffer.Send(&s1)
assert.Equal(t, &segment[1], <-ringBuffer.c, "Truncate the first segment in the original buffered channel")
assert.Equal(t, randomFlag, cap(ringBuffer.c), "The buffered channel still full after truncating")
assert.True(t, strings.Contains(log.Logs[0], "Segment buffer is full. Dropping oldest segment document."))
s2 := tracesegment.GetTestTraceSegment()
ringBuffer.Send(&s2)
assert.Equal(t, &segment[2], <-ringBuffer.c, "Truncate the second segment that in the original buffered channel")
assert.Equal(t, randomFlag, cap(ringBuffer.c), "The buffered channel still full after truncating")
assert.True(t, strings.Contains(log.Logs[0], "Segment buffer is full. Dropping oldest segment document."))
}
// getTestChannelSize returns a random number greater than or equal to defaultCapacity
func getTestChannelSize() int {
return rand.Intn(50) + defaultCapacity
}
| 168 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package socketconn
// SocketConn is an interface for socket connection.
type SocketConn interface {
// Reads a packet from the connection, copying the payload into b. It returns number of bytes copied.
Read(b []byte) (int, error)
// Closes the connection.
Close()
}
| 20 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package udp
import (
"net"
"os"
"github.com/aws/aws-xray-daemon/pkg/socketconn"
log "github.com/cihub/seelog"
)
// UDP defines UDP socket connection.
type UDP struct {
socket *net.UDPConn
}
// New returns new instance of UDP.
func New(udpAddress string) socketconn.SocketConn {
log.Debugf("Listening on UDP %v", udpAddress)
addr, err := net.ResolveUDPAddr("udp", udpAddress)
if err != nil {
log.Errorf("%v", err)
os.Exit(1)
}
sock, err := net.ListenUDP("udp", addr)
if err != nil {
log.Errorf("%v", err)
os.Exit(1)
}
return UDP{
socket: sock,
}
}
// Read returns number of bytes read from the UDP connection.
func (conn UDP) Read(b []byte) (int, error) {
rlen, _, err := conn.socket.ReadFromUDP(b)
return rlen, err
}
// Close closes current UDP connection.
func (conn UDP) Close() {
err := conn.socket.Close()
if err != nil {
log.Errorf("unable to close the UDP connection: %v", err)
}
}
| 56 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package telemetry
import (
"os"
"sync/atomic"
"time"
"unsafe"
"github.com/aws/aws-xray-daemon/pkg/conn"
"github.com/aws/aws-xray-daemon/pkg/util/timer"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/xray"
log "github.com/cihub/seelog"
)
const dataCutoffIntervalSecs = 60
const bufferSize = 30
const requestSize = 10
// T is instance of Telemetry.
var T *Telemetry
// Telemetry is used to record X-Ray daemon health.
type Telemetry struct {
// Instance of XRay.
client conn.XRay
timer timer.Timer
// Amazon Resource Name (ARN) of the AWS resource running the daemon.
resourceARN string
// Instance id of the EC2 instance running X-Ray daemon.
instanceID string
// Host name of the EC2 instance running X-Ray daemon.
hostname string
// Self pointer.
currentRecord *xray.TelemetryRecord
// Timer channel.
timerChan <-chan time.Time
// Boolean channel, set to true when Quit channel is set to true.
Done chan bool
// Boolean channel, set to true when daemon is closed,
Quit chan bool
// Channel of TelemetryRecord used to send to X-Ray service.
recordChan chan *xray.TelemetryRecord
// When segment is received, postTelemetry is set to true,
// indicating send telemetry data for the received segment.
postTelemetry bool
}
// Init instantiates a new instance of Telemetry.
func Init(awsConfig *aws.Config, s *session.Session, resourceARN string, noMetadata bool) {
T = newT(awsConfig, s, resourceARN, noMetadata)
log.Debug("Telemetry initiated")
}
// EvaluateConnectionError processes error with respect to request failure status code.
func EvaluateConnectionError(err error) {
requestFailure, ok := err.(awserr.RequestFailure)
if ok {
statusCode := requestFailure.StatusCode()
if statusCode >= 500 && statusCode < 600 {
T.Connection5xx(1)
} else if statusCode >= 400 && statusCode < 500 {
T.Connection4xx(1)
} else {
T.ConnectionOther(1)
}
} else {
if conn.IsTimeoutError(err) {
T.ConnectionTimeout(1)
} else {
awsError, ok := err.(awserr.Error)
if ok {
if awsError.Code() == "RequestError" {
T.ConnectionUnknownHost(1)
}
} else {
T.ConnectionOther(1)
}
}
}
}
// GetTestTelemetry returns an empty telemetry record.
func GetTestTelemetry() *Telemetry {
return &Telemetry{
currentRecord: getEmptyTelemetryRecord(),
}
}
// SegmentReceived increments SegmentsReceivedCount for the Telemetry record.
func (t *Telemetry) SegmentReceived(count int64) {
atomic.AddInt64(t.currentRecord.SegmentsReceivedCount, count)
// Only send telemetry data when we receive any segment or else skip any telemetry data
t.postTelemetry = true
}
// SegmentSent increments SegmentsSentCount for the Telemetry record.
func (t *Telemetry) SegmentSent(count int64) {
atomic.AddInt64(t.currentRecord.SegmentsSentCount, count)
}
// SegmentSpillover increments SegmentsSpilloverCount for the Telemetry record.
func (t *Telemetry) SegmentSpillover(count int64) {
atomic.AddInt64(t.currentRecord.SegmentsSpilloverCount, count)
}
// SegmentRejected increments SegmentsRejectedCount for the Telemetry record.
func (t *Telemetry) SegmentRejected(count int64) {
atomic.AddInt64(t.currentRecord.SegmentsRejectedCount, count)
}
// ConnectionTimeout increments TimeoutCount for the Telemetry record.
func (t *Telemetry) ConnectionTimeout(count int64) {
atomic.AddInt64(t.currentRecord.BackendConnectionErrors.TimeoutCount, count)
}
// ConnectionRefusal increments ConnectionRefusedCount for the Telemetry record.
func (t *Telemetry) ConnectionRefusal(count int64) {
atomic.AddInt64(t.currentRecord.BackendConnectionErrors.ConnectionRefusedCount, count)
}
// Connection4xx increments HTTPCode4XXCount for the Telemetry record.
func (t *Telemetry) Connection4xx(count int64) {
atomic.AddInt64(t.currentRecord.BackendConnectionErrors.HTTPCode4XXCount, count)
}
// Connection5xx increments HTTPCode5XXCount count for the Telemetry record.
func (t *Telemetry) Connection5xx(count int64) {
atomic.AddInt64(t.currentRecord.BackendConnectionErrors.HTTPCode5XXCount, count)
}
// ConnectionUnknownHost increments unknown host BackendConnectionErrors count for the Telemetry record.
func (t *Telemetry) ConnectionUnknownHost(count int64) {
atomic.AddInt64(t.currentRecord.BackendConnectionErrors.UnknownHostCount, count)
}
// ConnectionOther increments other BackendConnectionErrors count for the Telemetry record.
func (t *Telemetry) ConnectionOther(count int64) {
atomic.AddInt64(t.currentRecord.BackendConnectionErrors.OtherCount, count)
}
func newT(awsConfig *aws.Config, s *session.Session, resourceARN string, noMetadata bool) *Telemetry {
timer := &timer.Client{}
hostname := ""
instanceID := ""
var metadataClient *ec2metadata.EC2Metadata
if !noMetadata {
metadataClient = ec2metadata.New(s)
}
hostnameEnv := os.Getenv("AWS_HOSTNAME")
if hostnameEnv != "" {
hostname = hostnameEnv
log.Debugf("Fetch hostname %v from environment variables", hostnameEnv)
} else if metadataClient != nil {
hn, err := metadataClient.GetMetadata("hostname")
if err != nil {
log.Debugf("Get hostname metadata failed: %s", err)
} else {
hostname = hn
log.Debugf("Using %v hostname for telemetry records", hostname)
}
} else {
log.Debug("No hostname set for telemetry records")
}
instanceIDEnv := os.Getenv("AWS_INSTANCE_ID")
if instanceIDEnv != "" {
instanceID = instanceIDEnv
log.Debugf("Fetch instance ID %v from environment variables", instanceIDEnv)
} else if metadataClient != nil {
instID, err := metadataClient.GetMetadata("instance-id")
if err != nil {
log.Errorf("Get instance id metadata failed: %s", err)
} else {
instanceID = instID
log.Debugf("Using %v Instance Id for Telemetry records", instanceID)
}
} else {
log.Debug("No Instance Id set for telemetry records")
}
record := getEmptyTelemetryRecord()
t := &Telemetry{
timer: timer,
resourceARN: resourceARN,
instanceID: instanceID,
hostname: hostname,
currentRecord: record,
timerChan: getDataCutoffDelay(timer),
Done: make(chan bool),
Quit: make(chan bool),
recordChan: make(chan *xray.TelemetryRecord, bufferSize),
postTelemetry: false,
}
telemetryClient := conn.NewXRay(awsConfig, s)
t.client = telemetryClient
go t.pushData()
return t
}
func getZeroInt64() *int64 {
var zero int64
zero = 0
return &zero
}
func getEmptyTelemetryRecord() *xray.TelemetryRecord {
return &xray.TelemetryRecord{
SegmentsReceivedCount: getZeroInt64(),
SegmentsRejectedCount: getZeroInt64(),
SegmentsSentCount: getZeroInt64(),
SegmentsSpilloverCount: getZeroInt64(),
BackendConnectionErrors: &xray.BackendConnectionErrors{
HTTPCode4XXCount: getZeroInt64(),
HTTPCode5XXCount: getZeroInt64(),
ConnectionRefusedCount: getZeroInt64(),
OtherCount: getZeroInt64(),
TimeoutCount: getZeroInt64(),
UnknownHostCount: getZeroInt64(),
},
}
}
func (t *Telemetry) pushData() {
for {
quit := false
select {
case <-t.Quit:
quit = true
break
case <-t.timerChan:
}
emptyRecord := getEmptyTelemetryRecord()
recordToReport := unsafe.Pointer(emptyRecord)
recordToPushPointer := unsafe.Pointer(t.currentRecord)
// Rotation Logic:
// Swap current record to record to report.
// Record to report is set to empty record which is set to current record
t.currentRecord = (*xray.TelemetryRecord)(atomic.SwapPointer(&recordToReport,
recordToPushPointer))
currentTime := time.Now()
record := (*xray.TelemetryRecord)(recordToReport)
record.Timestamp = ¤tTime
t.add(record)
t.sendAll()
if quit {
close(t.recordChan)
log.Debug("telemetry: done!")
t.Done <- true
break
} else {
t.timerChan = getDataCutoffDelay(t.timer)
}
}
}
func (t *Telemetry) add(record *xray.TelemetryRecord) {
// Only send telemetry data when we receive first segment or else do not send any telemetry data.
if t.postTelemetry {
select {
case t.recordChan <- record:
default:
select {
case <-t.recordChan:
log.Debug("Telemetry Buffers truncated")
t.add(record)
default:
log.Debug("Telemetry Buffers dequeued")
}
}
} else {
log.Debug("Skipped telemetry data as no segments found")
}
}
func (t *Telemetry) sendAll() {
records := t.collectAllRecords()
recordsNoSend, err := t.sendRecords(records)
if err != nil {
log.Debugf("Failed to send telemetry %v record(s). Re-queue records. %v", len(records), err)
// There might be possibility that new records might be archived during re-queue records.
// But as timer is set after records are send this will not happen
for _, record := range recordsNoSend {
t.add(record)
}
}
}
func (t *Telemetry) collectAllRecords() []*xray.TelemetryRecord {
records := make([]*xray.TelemetryRecord, bufferSize)
records = records[:0]
var record *xray.TelemetryRecord
done := false
for !done {
select {
case record = <-t.recordChan:
recordLen := len(records)
if recordLen < bufferSize {
records = append(records, record)
}
default:
done = true
}
}
return records
}
func (t *Telemetry) sendRecords(records []*xray.TelemetryRecord) ([]*xray.TelemetryRecord, error) {
if len(records) > 0 {
for i := 0; i < len(records); i = i + requestSize {
endIndex := len(records)
if endIndex > i+requestSize {
endIndex = i + requestSize
}
recordsToSend := records[i:endIndex]
input := xray.PutTelemetryRecordsInput{
EC2InstanceId: &t.instanceID,
Hostname: &t.hostname,
ResourceARN: &t.resourceARN,
TelemetryRecords: recordsToSend,
}
_, err := t.client.PutTelemetryRecords(&input)
if err != nil {
EvaluateConnectionError(err)
return records[i:], err
}
}
log.Debugf("Send %v telemetry record(s)", len(records))
}
return nil, nil
}
func getDataCutoffDelay(timer timer.Timer) <-chan time.Time {
return timer.After(time.Duration(time.Second * dataCutoffIntervalSecs))
}
| 359 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package telemetry
import (
"errors"
"fmt"
"strings"
"testing"
"github.com/aws/aws-xray-daemon/pkg/util/test"
"github.com/aws/aws-sdk-go/service/xray"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
type MockXRayClient struct {
mock.Mock
CallNoToPutTelemetryRecords int
}
func (c *MockXRayClient) PutTraceSegments(input *xray.PutTraceSegmentsInput) (*xray.PutTraceSegmentsOutput, error) {
return nil, nil
}
func (c *MockXRayClient) PutTelemetryRecords(input *xray.PutTelemetryRecordsInput) (*xray.PutTelemetryRecordsOutput, error) {
c.CallNoToPutTelemetryRecords++
args := c.Called(nil)
errorStr := args.String(0)
var err error
output := &xray.PutTelemetryRecordsOutput{}
if errorStr != "" {
err = errors.New(errorStr)
}
return output, err
}
func TestGetEmptyTelemetryRecord(t *testing.T) {
emptyRecord := getEmptyTelemetryRecord()
assert.EqualValues(t, emptyRecord.SegmentsReceivedCount, new(int64))
assert.EqualValues(t, emptyRecord.SegmentsRejectedCount, new(int64))
assert.EqualValues(t, emptyRecord.SegmentsSentCount, new(int64))
assert.EqualValues(t, emptyRecord.SegmentsSpilloverCount, new(int64))
assert.EqualValues(t, emptyRecord.BackendConnectionErrors.ConnectionRefusedCount, new(int64))
assert.EqualValues(t, emptyRecord.BackendConnectionErrors.HTTPCode4XXCount, new(int64))
assert.EqualValues(t, emptyRecord.BackendConnectionErrors.HTTPCode5XXCount, new(int64))
assert.EqualValues(t, emptyRecord.BackendConnectionErrors.OtherCount, new(int64))
assert.EqualValues(t, emptyRecord.BackendConnectionErrors.TimeoutCount, new(int64))
assert.EqualValues(t, emptyRecord.BackendConnectionErrors.UnknownHostCount, new(int64))
}
func TestAddTelemetryRecord(t *testing.T) {
log := test.LogSetup()
timer := &test.MockTimerClient{}
telemetry := &Telemetry{
client: &MockXRayClient{},
timer: timer,
resourceARN: "",
instanceID: "",
hostname: "",
currentRecord: getEmptyTelemetryRecord(),
timerChan: getDataCutoffDelay(timer),
Done: make(chan bool),
Quit: make(chan bool),
recordChan: make(chan *xray.TelemetryRecord, 1),
postTelemetry: true,
}
telemetry.add(getEmptyTelemetryRecord())
telemetry.add(getEmptyTelemetryRecord())
assert.True(t, strings.Contains(log.Logs[0], "Telemetry Buffers truncated"))
}
func TestSendRecordSuccess(t *testing.T) {
log := test.LogSetup()
xRay := new(MockXRayClient)
xRay.On("PutTelemetryRecords", nil).Return("").Once()
timer := &test.MockTimerClient{}
telemetry := &Telemetry{
client: xRay,
timer: timer,
resourceARN: "",
instanceID: "",
hostname: "",
currentRecord: getEmptyTelemetryRecord(),
timerChan: getDataCutoffDelay(timer),
Done: make(chan bool),
Quit: make(chan bool),
recordChan: make(chan *xray.TelemetryRecord, 1),
}
records := make([]*xray.TelemetryRecord, 1)
records[0] = getEmptyTelemetryRecord()
telemetry.sendRecords(records)
assert.EqualValues(t, xRay.CallNoToPutTelemetryRecords, 1)
assert.True(t, strings.Contains(log.Logs[0], fmt.Sprintf("Send %v telemetry record(s)", 1)))
}
func TestAddRecordWithPostSegmentFalse(t *testing.T) {
log := test.LogSetup()
timer := &test.MockTimerClient{}
telemetry := &Telemetry{
client: &MockXRayClient{},
timer: timer,
resourceARN: "",
instanceID: "",
hostname: "",
currentRecord: getEmptyTelemetryRecord(),
timerChan: getDataCutoffDelay(timer),
Done: make(chan bool),
Quit: make(chan bool),
recordChan: make(chan *xray.TelemetryRecord, 1),
}
telemetry.add(getEmptyTelemetryRecord())
assert.True(t, strings.Contains(log.Logs[0], "Skipped telemetry data as no segments found"))
}
func TestAddRecordBeforeFirstSegmentAndAfter(t *testing.T) {
log := test.LogSetup()
timer := &test.MockTimerClient{}
telemetry := &Telemetry{
client: &MockXRayClient{},
timer: timer,
resourceARN: "",
instanceID: "",
hostname: "",
currentRecord: getEmptyTelemetryRecord(),
timerChan: getDataCutoffDelay(timer),
Done: make(chan bool),
Quit: make(chan bool),
recordChan: make(chan *xray.TelemetryRecord, 1),
}
// No Segment received
telemetry.add(getEmptyTelemetryRecord())
assert.True(t, strings.Contains(log.Logs[0], "Skipped telemetry data as no segments found"))
// Segment received
telemetry.SegmentReceived(1)
telemetry.add(getEmptyTelemetryRecord())
telemetry.add(getEmptyTelemetryRecord())
assert.True(t, strings.Contains(log.Logs[1], "Telemetry Buffers truncated"))
}
| 157 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package tracesegment
import (
"bytes"
"compress/zlib"
log "github.com/cihub/seelog"
"strings"
)
// Header stores header of trace segment.
type Header struct {
Format string `json:"format"`
Version int `json:"version"`
}
// IsValid validates Header.
func (t Header) IsValid() bool {
return strings.EqualFold(t.Format, "json") && t.Version == 1
}
// TraceSegment stores raw segment.
type TraceSegment struct {
Raw *[]byte
PoolBuf *[]byte
}
// Deflate converts TraceSegment to bytes
func (r *TraceSegment) Deflate() []byte {
var b bytes.Buffer
w := zlib.NewWriter(&b)
rawBytes := *r.Raw
_, err := w.Write(rawBytes)
if err != nil {
log.Errorf("%v", err)
}
err = w.Close()
if err != nil {
log.Errorf("%v", err)
}
return b.Bytes()
}
| 55 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package tracesegment
import (
"bytes"
"compress/zlib"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDeflateWithValidInput(t *testing.T) {
testSegment := GetTestTraceSegment()
deflatedBytes := testSegment.Deflate()
rawBytes := *testSegment.Raw
assert.True(t, len(rawBytes) > len(deflatedBytes), "Deflated bytes should compress raw bytes")
// Testing reverting compression using zlib
deflatedBytesBuffer := bytes.NewBuffer(deflatedBytes)
reader, err := zlib.NewReader(deflatedBytesBuffer)
if err != nil {
panic(err)
}
var deflatedBytesRecovered = make([]byte, 1000)
n, err := reader.Read(deflatedBytesRecovered)
if err != nil && err != io.EOF {
panic(err)
}
deflatedBytesRecovered = deflatedBytesRecovered[:n]
assert.Equal(t, n, len(deflatedBytesRecovered))
assert.Equal(t, len(deflatedBytesRecovered), len(rawBytes))
for index, byteVal := range rawBytes {
assert.Equal(t, byteVal, deflatedBytesRecovered[index], "Difference in recovered and original bytes")
}
}
func TestTraceSegmentHeaderIsValid(t *testing.T) {
header := Header{
Format: "json",
Version: 1,
}
valid := header.IsValid()
assert.True(t, valid)
}
func TestTraceSegmentHeaderIsValidCaseInsensitive(t *testing.T) {
header := Header{
Format: "jSoN",
Version: 1,
}
valid := header.IsValid()
assert.True(t, valid)
}
func TestTraceSegmentHeaderIsValidWrongVersion(t *testing.T) {
header := Header{
Format: "json",
Version: 2,
}
valid := header.IsValid()
assert.False(t, valid)
}
func TestTraceSegmentHeaderIsValidWrongFormat(t *testing.T) {
header := Header{
Format: "xml",
Version: 1,
}
valid := header.IsValid()
assert.False(t, valid)
}
func TestTraceSegmentHeaderIsValidWrongFormatVersion(t *testing.T) {
header := Header{
Format: "xml",
Version: 2,
}
valid := header.IsValid()
assert.False(t, valid)
}
| 103 |
aws-xray-daemon | aws | Go | // Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
package tracesegment
import (
"fmt"
"math/rand"
)
// GetTestTraceSegment returns new instance of TraceSegment used for testing.
func GetTestTraceSegment() TraceSegment {
traceRandomNumber := rand.Int()
segmentRandomNumber := rand.Int()
message := fmt.Sprintf("{\"trace_id\": \"%v\", \"id\": \"%v\", \"start_time\": 1461096053.37518, "+
"\"end_time\": 1461096053.4042, "+
"\"name\": \"hello-1.mbfzqxzcpe.us-east-1.elasticbeanstalk.com\"}",
traceRandomNumber,
segmentRandomNumber)
buf := make([]byte, 100)
messageBytes := []byte(message)
segment := TraceSegment{
PoolBuf: &buf,
Raw: &messageBytes,
}
return segment
}
| 35 |
aws-xray-daemon | aws | Go | package util
import (
"bytes"
log "github.com/cihub/seelog"
)
// SplitHeaderBody separates header and body of buf using provided separator sep, and stores in returnByte.
func SplitHeaderBody(buf, sep *[]byte, returnByte *[][]byte) [][]byte {
if buf == nil {
log.Error("Buf to split passed nil")
return nil
}
if sep == nil {
log.Error("Separator used to split passed nil")
return nil
}
if returnByte == nil {
log.Error("Return Buf to be used to store split passed nil")
return nil
}
separator := *sep
bufVal := *buf
lenSeparator := len(separator)
var header, body []byte
header = *buf
for i := 0; i < len(bufVal); i++ {
if bytes.Equal(bufVal[i:i+lenSeparator], separator) {
header = bufVal[0:i]
body = bufVal[i+lenSeparator:]
break
}
if i == len(bufVal)-1 {
log.Warnf("Missing header: %s", header)
}
}
returnByteVal := *returnByte
return append(returnByteVal[:0], header, body)
}
// GetMinIntValue returns minimum between a and b.
func GetMinIntValue(a, b int) int {
if a < b {
return a
}
return b
}
// Bool return pointer to input parameter
func Bool(b bool) *bool {
return &b
}
| 55 |
aws-xray-daemon | aws | Go | package util
import (
"strings"
"testing"
"github.com/aws/aws-xray-daemon/pkg/util/test"
"github.com/stretchr/testify/assert"
)
func TestSplitHeaderBodyWithSeparatorExists(t *testing.T) {
str := "Header\nBody"
separator := "\n"
buf := []byte(str)
separatorArray := []byte(separator)
result := make([][]byte, 2)
returnResult := SplitHeaderBody(&buf, &separatorArray, &result)
assert.EqualValues(t, len(result), 2)
assert.EqualValues(t, string(result[0]), "Header")
assert.EqualValues(t, string(result[1]), "Body")
assert.EqualValues(t, string(returnResult[0]), "Header")
assert.EqualValues(t, string(returnResult[1]), "Body")
assert.EqualValues(t, string(buf), str)
assert.EqualValues(t, string(separatorArray), separator)
}
func TestSplitHeaderBodyWithSeparatorDoesNotExist(t *testing.T) {
str := "Header"
separator := "\n"
buf := []byte(str)
separatorArray := []byte(separator)
result := make([][]byte, 2)
returnResult := SplitHeaderBody(&buf, &separatorArray, &result)
assert.EqualValues(t, len(result), 2)
assert.EqualValues(t, string(result[0]), "Header")
assert.EqualValues(t, string(result[1]), "")
assert.EqualValues(t, string(returnResult[0]), "Header")
assert.EqualValues(t, string(returnResult[1]), "")
assert.EqualValues(t, string(buf), str)
assert.EqualValues(t, string(separatorArray), separator)
}
func TestSplitHeaderBodyNilBuf(t *testing.T) {
log := test.LogSetup()
separator := "\n"
separatorArray := []byte(separator)
result := make([][]byte, 2)
SplitHeaderBody(nil, &separatorArray, &result)
assert.True(t, strings.Contains(log.Logs[0], "Buf to split passed nil"))
}
func TestSplitHeaderBodyNilSeparator(t *testing.T) {
log := test.LogSetup()
str := "Test String"
buf := []byte(str)
result := make([][]byte, 2)
SplitHeaderBody(&buf, nil, &result)
assert.True(t, strings.Contains(log.Logs[0], "Separator used to split passed nil"))
}
func TestSplitHeaderBodyNilResult(t *testing.T) {
log := test.LogSetup()
str := "Test String"
buf := []byte(str)
separator := "\n"
separatorArray := []byte(separator)
SplitHeaderBody(&buf, &separatorArray, nil)
assert.True(t, strings.Contains(log.Logs[0], "Return Buf to be used to store split passed nil"))
}
func TestGetMinIntValue(t *testing.T) {
assert.Equal(t, GetMinIntValue(1, 1), 1, "Return value should be 1")
assert.Equal(t, GetMinIntValue(0, 1), 0, "Return value should be 0")
assert.Equal(t, GetMinIntValue(1, 0), 0, "Return value should be 0")
}
| 84 |
aws-xray-daemon | aws | Go | package test
import (
log "github.com/cihub/seelog"
)
// LogWriter defines structure for log writer.
type LogWriter struct {
Logs []string
}
// Write writes p bytes to log writer.
func (sw *LogWriter) Write(p []byte) (n int, err error) {
sw.Logs = append(sw.Logs, string(p))
return len(p), nil
}
// LogSetup initializes log writer.
func LogSetup() *LogWriter {
writer := &LogWriter{}
logger, err := log.LoggerFromWriterWithMinLevelAndFormat(writer, log.TraceLvl, "%Ns [%Level] %Msg")
if err != nil {
panic(err)
}
log.ReplaceLogger(logger)
return writer
}
| 28 |
aws-xray-daemon | aws | Go | package test
import (
"sync"
"sync/atomic"
"time"
)
type timer struct {
start time.Time
duration time.Duration
repeat bool
fired bool
c chan time.Time
}
// MockTimerClient contains mock timer client.
type MockTimerClient struct {
sync.RWMutex
current time.Time
timers []*timer
afterCalled uint64
tickCalled uint64
}
func (m *MockTimerClient) newTimer(d time.Duration, repeat bool) *timer {
m.RLock()
t := &timer{
start: m.current,
duration: d,
repeat: repeat,
fired: false,
c: make(chan time.Time, 1),
}
m.RUnlock()
m.Lock()
defer m.Unlock()
m.timers = append(m.timers, t)
return t
}
// After is mock of time.After().
func (m *MockTimerClient) After(d time.Duration) <-chan time.Time {
atomic.AddUint64(&m.afterCalled, 1)
return m.newTimer(d, false).c
}
// Tick is mock of time.Tick().
func (m *MockTimerClient) Tick(d time.Duration) <-chan time.Time {
atomic.AddUint64(&m.tickCalled, 1)
return m.newTimer(d, true).c
}
// Advance simulates time passing and signal timers / tickers accordingly
func (m *MockTimerClient) Advance(d time.Duration) {
m.Lock()
m.current = m.current.Add(d)
m.Unlock()
m.RLock()
defer m.RUnlock()
curr := m.current
for _, t := range m.timers {
if t.repeat {
// for Tickers, calculate how many ticks has passed and signal accordingly
for i := int64(0); i < int64(curr.Sub(t.start)/t.duration); i++ {
t.c <- t.start.Add(t.duration * time.Duration(i+1))
t.start = t.start.Add(t.duration)
}
} else {
// for Afters (one-off), signal once
if !t.fired && (curr.Sub(t.start) >= t.duration) {
t.c <- t.start.Add(t.duration)
t.fired = true
}
}
}
}
// AfterCalledTimes calculates number of times after is called.
func (m *MockTimerClient) AfterCalledTimes() uint64 {
return atomic.LoadUint64(&m.afterCalled)
}
// TickCalledTimes calculates number of times tick is called.
func (m *MockTimerClient) TickCalledTimes() uint64 {
return atomic.LoadUint64(&m.tickCalled)
}
| 96 |
aws-xray-daemon | aws | Go | package test
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
type EmptyStruct struct {
}
func ChannelHasData(c chan EmptyStruct) bool {
var ok bool
select {
case <-c:
ok = true
default:
ok = false
}
return ok
}
// This function is used so that test cases will not freeze if chan is not responsive
func TryToGetValue(ch chan EmptyStruct) *EmptyStruct {
timeout := make(chan bool, 1)
go func() {
time.Sleep(100 * time.Millisecond)
timeout <- true
}()
select {
case v := <-ch:
return &v
case <-timeout:
return nil
}
}
func TickTestHelper(tickDuration int64, t *testing.T) {
timer := &MockTimerClient{current: time.Unix(35534432431, 0)}
tickChan := make(chan EmptyStruct, 1)
tickFunc := func() {
// Go routine started
tickChan <- EmptyStruct{}
t := timer.Tick(time.Duration(tickDuration))
for {
<-t
tickChan <- EmptyStruct{}
}
}
go tickFunc()
// Go routine to monitor tick started
<-tickChan
testCasesTicksToTrigger := []int{1, 2, 1000}
var durationIncremented int64
for _, ticksToTrigger := range testCasesTicksToTrigger {
for i := 0; i < ticksToTrigger; i++ {
var ok bool
ok = ChannelHasData(tickChan)
assert.False(t, ok)
initialIncrement := tickDuration / 2
// Not enough to trigger tick
timer.Advance(time.Duration(initialIncrement))
durationIncremented += initialIncrement
ok = ChannelHasData(tickChan)
assert.False(t, ok)
// tick triggered
timer.Advance(time.Duration(tickDuration))
durationIncremented += tickDuration
val := TryToGetValue(tickChan)
assert.NotNil(t,
val,
fmt.Sprintf("Expected value passed thru the channel. Tick Duration: %v, Tick Trigger Iteration: %v, Ticket To Trigger: %v Current Clock Time: %v",
tickDuration,
i,
ticksToTrigger,
timer.current))
// Adding 4th of the duration to trigger
durationForth := tickDuration / 4
timer.Advance(time.Duration(durationForth))
durationIncremented += durationForth
ok = ChannelHasData(tickChan)
assert.False(t, ok)
// Leave the duration with exact divisor so that next loop can assume
// duration increment is zero
finalIncrement := tickDuration*2 - durationIncremented
// tick triggered
timer.Advance(time.Duration(finalIncrement))
val = TryToGetValue(tickChan)
assert.NotNil(t, val)
durationIncremented = 0
}
}
assert.EqualValues(t, 1, timer.TickCalledTimes())
}
func TestTickDuration454(t *testing.T) {
var tickDuration int64
tickDuration = 454
TickTestHelper(tickDuration, t)
}
func TestAfter(t *testing.T) {
var afterDuration int64
afterDuration = 10
timer := MockTimerClient{current: time.Unix(2153567564, 0)}
afterChan := make(chan EmptyStruct, 1)
tickFunc := func() {
// Go routine started
afterChan <- EmptyStruct{}
t := timer.After(time.Duration(afterDuration))
for {
<-t
afterChan <- EmptyStruct{}
}
}
go tickFunc()
// Go routine started to monitor after messages
<-afterChan
var ok bool
ok = ChannelHasData(afterChan)
assert.False(t, ok)
initialIncrement := afterDuration / 2
// Not enough to trigger after
timer.Advance(time.Duration(initialIncrement))
ok = ChannelHasData(afterChan)
assert.False(t, ok)
// after triggered
timer.Advance(time.Duration(afterDuration))
val := TryToGetValue(afterChan)
assert.NotNil(t, val, fmt.Sprintf("Expected value passed thru the channel. After Duration: %v, Current Clock Time: %v", afterDuration, timer.current))
// After should trigger only once compared to tick
timer.Advance(time.Duration(afterDuration))
ok = ChannelHasData(afterChan)
assert.False(t, ok)
assert.EqualValues(t, 1, timer.AfterCalledTimes())
}
func TestAfterTickTogether(t *testing.T) {
var tickDuration int64
tickDuration = 10
afterDuration := tickDuration * 2
timer := MockTimerClient{current: time.Unix(23082153551, 0)}
tickChan := make(chan EmptyStruct, 1)
afterChan := make(chan EmptyStruct, 1)
tickFunc := func() {
// Go routine started
tick := timer.Tick(time.Duration(tickDuration))
tickChan <- EmptyStruct{}
for {
select {
case <-tick:
tickChan <- EmptyStruct{}
}
}
}
afterFunc := func() {
// Go routine started
after := timer.After(time.Duration(afterDuration))
afterChan <- EmptyStruct{}
for {
select {
case <-after:
afterChan <- EmptyStruct{}
}
}
}
go tickFunc()
go afterFunc()
// Go routine started to monitor tick and after events
<-tickChan
<-afterChan
testCasesTicksToTrigger := []int{1, 2, 100}
var durationIncremented int64
for triggerIndex, ticksToTrigger := range testCasesTicksToTrigger {
for i := 0; i < ticksToTrigger; i++ {
var ok bool
ok = ChannelHasData(tickChan)
assert.False(t, ok)
ok = ChannelHasData(afterChan)
assert.False(t, ok)
initialIncrement := tickDuration / 2
// Not enough to trigger tick
timer.Advance(time.Duration(initialIncrement))
durationIncremented += initialIncrement
ok = ChannelHasData(tickChan)
assert.False(t, ok)
ok = ChannelHasData(afterChan)
assert.False(t, ok)
// tick triggered
timer.Advance(time.Duration(tickDuration))
durationIncremented += tickDuration
val := TryToGetValue(tickChan)
assert.NotNil(t, val)
ok = ChannelHasData(afterChan)
assert.False(t, ok)
// Adding 4th of the duration to trigger
durationForth := tickDuration / 4
timer.Advance(time.Duration(durationForth))
durationIncremented += durationForth
ok = ChannelHasData(tickChan)
assert.False(t, ok)
ok = ChannelHasData(afterChan)
assert.False(t, ok)
// Leave the duration with exact divisor so that next loop can assume
// duration increment is zero
finalIncrement := tickDuration*2 - durationIncremented
// tick triggered
timer.Advance(time.Duration(finalIncrement))
// After will only trigger for first iteration as it only trigger once
if (triggerIndex == 0) && (i == 0) {
val = TryToGetValue(afterChan)
assert.NotNil(t, val)
} else {
ok = ChannelHasData(afterChan)
assert.False(t, ok)
}
val = TryToGetValue(tickChan)
assert.NotNil(t, val)
durationIncremented = 0
}
}
assert.EqualValues(t, 1, timer.TickCalledTimes())
assert.EqualValues(t, 1, timer.AfterCalledTimes())
}
| 244 |
aws-xray-daemon | aws | Go | package timer
import "time"
// Timer interface
type Timer interface {
Tick(d time.Duration) <-chan time.Time
After(d time.Duration) <-chan time.Time
}
// Client is an empty timer client.
type Client struct{}
// Tick is wrapper to time.Tick().
func (t *Client) Tick(d time.Duration) <-chan time.Time {
return time.Tick(d)
}
// After is wrapper to time.After().
func (t *Client) After(d time.Duration) <-chan time.Time {
return time.After(d)
}
| 23 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package beanstalk
import (
"encoding/json"
"io/ioutil"
"github.com/aws/aws-xray-sdk-go/internal/logger"
"github.com/aws/aws-xray-sdk-go/internal/plugins"
)
// Origin is the type of AWS resource that runs your application.
const Origin = "AWS::ElasticBeanstalk::Environment"
// Init activates ElasticBeanstalkPlugin at runtime.
func Init() {
if plugins.InstancePluginMetadata != nil && plugins.InstancePluginMetadata.BeanstalkMetadata == nil {
addPluginMetadata(plugins.InstancePluginMetadata)
}
}
func addPluginMetadata(pluginmd *plugins.PluginMetadata) {
ebConfigPath := "/var/elasticbeanstalk/xray/environment.conf"
rawConfig, err := ioutil.ReadFile(ebConfigPath)
if err != nil {
logger.Errorf("Unable to read Elastic Beanstalk configuration file %s: %v", ebConfigPath, err)
return
}
config := &plugins.BeanstalkMetadata{}
err = json.Unmarshal(rawConfig, config)
if err != nil {
logger.Errorf("Unable to unmarshal Elastic Beanstalk configuration file %s: %v", ebConfigPath, err)
return
}
pluginmd.BeanstalkMetadata = config
pluginmd.Origin = Origin
}
| 48 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package ec2
import (
"bytes"
"encoding/json"
"net/http"
"github.com/aws/aws-xray-sdk-go/internal/logger"
"github.com/aws/aws-xray-sdk-go/internal/plugins"
)
// Origin is the type of AWS resource that runs your application.
const Origin = "AWS::EC2::Instance"
type metadata struct {
AvailabilityZone string
ImageID string
InstanceID string
InstanceType string
}
//Init activates EC2Plugin at runtime.
func Init() {
if plugins.InstancePluginMetadata != nil && plugins.InstancePluginMetadata.EC2Metadata == nil {
addPluginMetadata(plugins.InstancePluginMetadata)
}
}
func addPluginMetadata(pluginmd *plugins.PluginMetadata) {
var instanceData metadata
imdsURL := "http://169.254.169.254/latest/"
client := &http.Client{
Transport: http.DefaultTransport,
}
token, err := getToken(imdsURL, client)
if err != nil {
logger.Debugf("Unable to fetch EC2 instance metadata token fallback to IMDS V1: %v", err)
}
resp, err := getMetadata(imdsURL, client, token)
if err != nil {
logger.Errorf("Unable to read EC2 instance metadata: %v", err)
return
}
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(resp.Body); err != nil {
logger.Errorf("Error while reading data from response buffer: %v", err)
return
}
metadata := buf.String()
if err := json.Unmarshal([]byte(metadata), &instanceData); err != nil {
logger.Errorf("Error while unmarshal operation: %v", err)
return
}
pluginmd.EC2Metadata = &plugins.EC2Metadata{InstanceID: instanceData.InstanceID, AvailabilityZone: instanceData.AvailabilityZone}
pluginmd.Origin = Origin
}
// getToken fetches token to fetch EC2 metadata
func getToken(imdsURL string, client *http.Client) (string, error) {
ttlHeader := "X-aws-ec2-metadata-token-ttl-seconds"
defaultTTL := "60"
tokenURL := imdsURL + "api/token"
req, _ := http.NewRequest("PUT", tokenURL, nil)
req.Header.Add(ttlHeader, defaultTTL)
resp, err := client.Do(req)
if err != nil {
return "", err
}
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(resp.Body); err != nil {
logger.Errorf("Error while reading data from response buffer: %v", err)
return "", err
}
token := buf.String()
return token, err
}
// getMetadata fetches instance metadata
func getMetadata(imdsURL string, client *http.Client, token string) (*http.Response, error) {
var metadataHeader string
metadataURL := imdsURL + "dynamic/instance-identity/document"
req, _ := http.NewRequest("GET", metadataURL, nil)
if token != "" {
metadataHeader = "X-aws-ec2-metadata-token"
req.Header.Add(metadataHeader, token)
}
return client.Do(req)
}
// Metdata represents IMDS response.
//
// Deprecated: Metdata exists only for backward compatibility.
type Metdata struct {
AvailabilityZone string
ImageID string
InstanceID string
InstanceType string
}
| 118 |
aws-xray-sdk-go | aws | Go | package ec2
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
const testMetadata = `{
"accountId" : "123367104812",
"architecture" : "x86_64",
"availabilityZone" : "us-west-2a",
"billingProducts" : null,
"devpayProductCodes" : null,
"marketplaceProductCodes" : null,
"imageId" : "ami-0fe02940a29f8239b",
"instanceId" : "i-032fe2d42797fb9a1",
"instanceType" : "c5.xlarge",
"kernelId" : null,
"pendingTime" : "2020-04-21T21:16:47Z",
"privateIp" : "172.19.57.109",
"ramdiskId" : null,
"region" : "us-west-2",
"version" : "2017-09-30"
}`
const (
documentPath = "/dynamic/instance-identity/document"
tokenPath = "/api/token"
ec2Endpoint = "http://169.254.169.254/latest"
)
func TestEndpoint(t *testing.T) {
req, _ := http.NewRequest("GET", ec2Endpoint, nil)
if e, a := ec2Endpoint, req.URL.String(); e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestIMDSv2Success(t *testing.T) {
// Start a local HTTP server
serverToken := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
assert.Equal(t, req.URL.String(), tokenPath)
_, _ = rw.Write([]byte("success"))
}))
serverMetadata := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
assert.Equal(t, req.URL.String(), documentPath)
assert.Equal(t, req.Header.Get("X-aws-ec2-metadata-token"), "success")
_, _ = rw.Write([]byte(testMetadata))
}))
defer serverToken.Close()
defer serverMetadata.Close()
client := &http.Client{
Transport: http.DefaultTransport,
}
// token fetch success
respToken, _ := getToken(serverToken.URL+"/", client)
assert.NotEqual(t, respToken, "")
// successfully metadata fetch using IMDS v2
respMetadata, _ := getMetadata(serverMetadata.URL+"/", client, respToken)
ec2Metadata, _ := ioutil.ReadAll(respMetadata.Body)
assert.Equal(t, []byte(testMetadata), ec2Metadata)
}
func TestIMDSv2Failv1Success(t *testing.T) {
// Start a local HTTP server
serverToken := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
assert.Equal(t, req.URL.String(), tokenPath)
_, _ = rw.Write([]byte("success"))
}))
serverMetadata := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
assert.Equal(t, req.URL.String(), documentPath)
_, _ = rw.Write([]byte(testMetadata))
}))
defer serverToken.Close()
defer serverMetadata.Close()
client := &http.Client{
Transport: http.DefaultTransport,
}
// token fetch fail
respToken, _ := getToken("/", client)
assert.Equal(t, respToken, "")
// fallback to IMDSv1 and successfully metadata fetch using IMDSv1
respMetadata, _ := getMetadata(serverMetadata.URL+"/", client, respToken)
ec2Metadata, _ := ioutil.ReadAll(respMetadata.Body)
assert.Equal(t, []byte(testMetadata), ec2Metadata)
}
func TestIMDSv2Failv1Fail(t *testing.T) {
// Start a local HTTP server
serverToken := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
assert.Equal(t, req.URL.String(), tokenPath)
_, _ = rw.Write([]byte("success"))
}))
serverMetadata := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
assert.Equal(t, req.URL.String(), documentPath)
_, _ = rw.Write([]byte(testMetadata))
}))
defer serverToken.Close()
defer serverMetadata.Close()
client := &http.Client{
Transport: http.DefaultTransport,
}
// token fetch fail
respToken, _ := getToken("/", client)
assert.Equal(t, respToken, "")
// fallback to IMDSv1 and fail metadata fetch using IMDSv1
_, err := getMetadata("/", client, respToken)
assert.Error(t, err)
}
| 134 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package ecs
import (
"os"
"github.com/aws/aws-xray-sdk-go/internal/logger"
"github.com/aws/aws-xray-sdk-go/internal/plugins"
)
// Origin is the type of AWS resource that runs your application.
const Origin = "AWS::ECS::Container"
// Init activates ECSPlugin at runtime.
func Init() {
if plugins.InstancePluginMetadata != nil && plugins.InstancePluginMetadata.ECSMetadata == nil {
addPluginMetadata(plugins.InstancePluginMetadata)
}
}
func addPluginMetadata(pluginmd *plugins.PluginMetadata) {
hostname, err := os.Hostname()
if err != nil {
logger.Errorf("Unable to retrieve hostname from OS. %v", err)
return
}
pluginmd.ECSMetadata = &plugins.ECSMetadata{ContainerName: hostname}
pluginmd.Origin = Origin
}
| 39 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package daemoncfg
import (
"net"
"os"
"strconv"
"strings"
"github.com/aws/aws-xray-sdk-go/internal/logger"
"github.com/pkg/errors"
)
var addressDelimiter = " " // delimiter between tcp and udp addresses
var udpKey = "udp"
var tcpKey = "tcp"
// DaemonEndpoints stores X-Ray daemon configuration about the ip address and port for UDP and TCP port. It gets the address
// string from "AWS_TRACING_DAEMON_ADDRESS" and then from recorder's configuration for DaemonAddr.
// A notation of '127.0.0.1:2000' or 'tcp:127.0.0.1:2000 udp:127.0.0.2:2001' or 'udp:127.0.0.1:2000 tcp:127.0.0.2:2001'
// are both acceptable. The first one means UDP and TCP are running at the same address.
// Notation 'hostname:2000' or 'tcp:hostname:2000 udp:hostname:2001' or 'udp:hostname:2000 tcp:hostname:2001' are also acceptable.
// By default it assumes a X-Ray daemon running at 127.0.0.1:2000 listening to both UDP and TCP traffic.
type DaemonEndpoints struct {
// UDPAddr represents UDP endpoint for segments to be sent by emitter.
UDPAddr *net.UDPAddr
// TCPAddr represents TCP endpoint of the daemon to make sampling API calls.
TCPAddr *net.TCPAddr
}
// GetDaemonEndpoints returns DaemonEndpoints.
func GetDaemonEndpoints() *DaemonEndpoints {
daemonEndpoint, err := GetDaemonEndpointsFromString("") // only environment variable would be parsed
if err != nil {
panic(err)
}
if daemonEndpoint == nil { // env variable not set
return GetDefaultDaemonEndpoints()
}
return daemonEndpoint // env variable successfully parsed
}
// GetDaemonEndpointsFromEnv resolves the daemon address if set in the environment variable.
func GetDaemonEndpointsFromEnv() (*DaemonEndpoints, error) {
if envDaemonAddr := os.Getenv("AWS_XRAY_DAEMON_ADDRESS"); envDaemonAddr != "" {
return resolveAddress(envDaemonAddr)
}
return nil, nil
}
// GetDefaultDaemonEndpoints returns the default UDP and TCP address of the daemon.
func GetDefaultDaemonEndpoints() *DaemonEndpoints {
udpAddr := &net.UDPAddr{
IP: net.IPv4(127, 0, 0, 1),
Port: 2000,
}
tcpAddr := &net.TCPAddr{
IP: net.IPv4(127, 0, 0, 1),
Port: 2000,
}
return &DaemonEndpoints{
UDPAddr: udpAddr,
TCPAddr: tcpAddr,
}
}
// GetDaemonEndpointsFromString parses provided daemon address if the environment variable is invalid or not set.
// DaemonEndpoints is non nil if the env variable or provided address is valid.
func GetDaemonEndpointsFromString(dAddr string) (*DaemonEndpoints, error) {
var daemonAddr string
// Try to get the X-Ray daemon address from an environment variable
if envDaemonAddr := os.Getenv("AWS_XRAY_DAEMON_ADDRESS"); envDaemonAddr != "" {
daemonAddr = envDaemonAddr
logger.Infof("using daemon endpoints from environment variable AWS_XRAY_DAEMON_ADDRESS: %v", envDaemonAddr)
} else if dAddr != "" {
daemonAddr = dAddr
}
if daemonAddr != "" {
return resolveAddress(daemonAddr)
}
return nil, nil
}
func resolveAddress(dAddr string) (*DaemonEndpoints, error) {
addr := strings.Split(dAddr, addressDelimiter)
switch len(addr) {
case 1:
return parseSingleForm(addr[0])
case 2:
return parseDoubleForm(addr)
}
return nil, errors.New("invalid daemon address: " + dAddr)
}
func parseDoubleForm(addr []string) (*DaemonEndpoints, error) {
addr1 := strings.Split(addr[0], ":") // tcp:127.0.0.1:2000 or udp:127.0.0.1:2000
addr2 := strings.Split(addr[1], ":") // tcp:127.0.0.1:2000 or udp:127.0.0.1:2000
if len(addr1) != 3 || len(addr2) != 3 {
return nil, errors.New("invalid daemon address: " + addr[0] + " " + addr[1])
}
// validate ports
_, pErr1 := strconv.Atoi(addr1[2])
_, pErr2 := strconv.Atoi(addr1[2])
if pErr1 != nil || pErr2 != nil {
return nil, errors.New("invalid daemon address port")
}
addrMap := make(map[string]string)
addrMap[addr1[0]] = addr1[1] + ":" + addr1[2]
addrMap[addr2[0]] = addr2[1] + ":" + addr2[2]
if addrMap[udpKey] == "" || addrMap[tcpKey] == "" { // for double form, tcp and udp keywords should be present
return nil, errors.New("invalid daemon address")
}
udpAddr, uErr := resolveUDPAddr(addrMap[udpKey])
if uErr != nil {
return nil, uErr
}
tcpAddr, tErr := resolveTCPAddr(addrMap[tcpKey])
if tErr != nil {
return nil, tErr
}
return &DaemonEndpoints{
UDPAddr: udpAddr,
TCPAddr: tcpAddr,
}, nil
}
func parseSingleForm(addr string) (*DaemonEndpoints, error) { // format = "ip:port"
a := strings.Split(addr, ":") // 127.0.0.1:2000
if len(a) != 2 {
return nil, errors.New("invalid daemon address: " + addr)
}
// validate port
_, pErr1 := strconv.Atoi(a[1])
if pErr1 != nil {
return nil, errors.New("invalid daemon address port")
}
udpAddr, uErr := resolveUDPAddr(addr)
if uErr != nil {
return nil, uErr
}
tcpAddr, tErr := resolveTCPAddr(addr)
if tErr != nil {
return nil, tErr
}
return &DaemonEndpoints{
UDPAddr: udpAddr,
TCPAddr: tcpAddr,
}, nil
}
func resolveUDPAddr(s string) (*net.UDPAddr, error) {
return net.ResolveUDPAddr(udpKey, s)
}
func resolveTCPAddr(s string) (*net.TCPAddr, error) {
return net.ResolveTCPAddr(tcpKey, s)
}
| 183 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package daemoncfg
import (
"fmt"
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
var portErr = "invalid daemon address port"
var addrErr = "invalid daemon address"
func TestGetDaemonEndpoints1(t *testing.T) { // default address set to udp and tcp
udpAddr := "127.0.0.1:2000"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dEndpt := GetDaemonEndpoints()
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpoints2(t *testing.T) { // default address set to udp and tcp
udpAddr := "127.0.0.1:4000"
tcpAddr := "127.0.0.1:5000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dAddr := "tcp:" + tcpAddr + " udp:" + udpAddr
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", dAddr) // env variable gets precedence over provided daemon addr
defer os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
dEndpt := GetDaemonEndpoints()
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromEnv1(t *testing.T) {
udpAddr := "127.0.0.1:4000"
tcpAddr := "127.0.0.1:5000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dAddr := "tcp:" + tcpAddr + " udp:" + udpAddr
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", dAddr)
defer os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
dEndpt, _ := GetDaemonEndpointsFromEnv()
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromEnv2(t *testing.T) {
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", "")
defer os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
dEndpt, err := GetDaemonEndpointsFromEnv()
assert.Nil(t, dEndpt)
assert.Nil(t, err)
}
func TestGetDefaultDaemonEndpoints(t *testing.T) {
udpAddr := "127.0.0.1:2000"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dEndpt := GetDefaultDaemonEndpoints()
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromString1(t *testing.T) {
udpAddr := "127.0.0.1:2000"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dAddr := udpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.Nil(t, err)
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromString2(t *testing.T) {
udpAddr := "127.0.0.1:2000"
tcpAddr := "127.0.0.1:2000"
dAddr := "127.0.0.1:2001"
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", udpAddr) // env variable gets precedence over provided daemon addr
defer os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.Nil(t, err)
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromString3(t *testing.T) {
udpAddr := "127.0.0.2:2001"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dAddr := "tcp:" + tcpAddr + " udp:" + udpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.Nil(t, err)
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromString4(t *testing.T) {
udpAddr := "127.0.0.2:2001"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dAddr := "udp:" + udpAddr + " tcp:" + tcpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.Nil(t, err)
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromString5(t *testing.T) {
udpAddr := "127.0.0.2:2001"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dAddr := "udp:" + udpAddr + " tcp:" + tcpAddr
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", dAddr) // env variable gets precedence over provided daemon addr
defer os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
dEndpt, err := GetDaemonEndpointsFromString("tcp:127.0.0.5:2001 udp:127.0.0.5:2001")
assert.Nil(t, err)
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsFromStringInvalid1(t *testing.T) { // "udp:127.0.0.5:2001 udp:127.0.0.5:2001"
udpAddr := "127.0.0.2:2001"
tcpAddr := "127.0.0.1:2000"
dAddr := "udp:" + udpAddr + " udp:" + tcpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.NotNil(t, err)
assert.True(t, strings.Contains(fmt.Sprint(err), addrErr))
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsFromStringInvalid2(t *testing.T) { // "tcp:127.0.0.5:2001 tcp:127.0.0.5:2001"
udpAddr := "127.0.0.2:2001"
tcpAddr := "127.0.0.1:2000"
dAddr := "tcp:" + udpAddr + " tcp:" + tcpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.NotNil(t, err)
assert.True(t, strings.Contains(fmt.Sprint(err), addrErr))
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsFromStringInvalid3(t *testing.T) { // env variable set is invalid, string passed is valid
udpAddr := "127.0.0.2:2001"
tcpAddr := "127.0.0.1:2000"
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", "tcp:127.0.0.5:2001 tcp:127.0.0.5:2001") // invalid
defer os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
dAddr := "udp:" + udpAddr + " tcp:" + tcpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.NotNil(t, err)
assert.True(t, strings.Contains(fmt.Sprint(err), addrErr))
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsFromStringInvalid4(t *testing.T) {
udpAddr := "1.2.1:2a" // error in resolving address port
tcpAddr := "127.0.0.1:2000"
dAddr := "udp:" + udpAddr + " tcp:" + tcpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.True(t, strings.Contains(fmt.Sprint(err), portErr))
assert.NotNil(t, err)
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsFromStringInvalid5(t *testing.T) {
udpAddr := "127.0.0.2:2001"
tcpAddr := "127.0.a.1:2000" // error in resolving address
dAddr := "udp:" + udpAddr + " tcp:" + tcpAddr
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.NotNil(t, err)
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsFromStringInvalid6(t *testing.T) {
udpAddr := "127.0.0.2:2001"
dAddr := "udp:" + udpAddr // no tcp address present
dEndpt, err := GetDaemonEndpointsFromString(dAddr)
assert.NotNil(t, err)
assert.True(t, strings.Contains(fmt.Sprint(err), addrErr))
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsFromStringInvalid7(t *testing.T) {
dAddr := ""
dEndpt, err := GetDaemonEndpointsFromString(dAddr) // address passed is nil and env variable not set
assert.Nil(t, err)
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsForHostname1(t *testing.T) { // parsing hostname - single form
udpAddr := "127.0.0.1:2000"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dEndpt, _ := GetDaemonEndpointsFromString("localhost:2000")
// Keep dEndpt.UDPAddr.IP as the IPv6 representation of an IPv4 address
if (len(dEndpt.UDPAddr.IP) == 4) {
dEndpt.UDPAddr.IP = dEndpt.UDPAddr.IP.To16()
}
if (len(dEndpt.TCPAddr.IP) == 4) {
dEndpt.TCPAddr.IP = dEndpt.TCPAddr.IP.To16()
}
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsForHostname2(t *testing.T) { // Invalid hostname - single form
dEndpt, err := GetDaemonEndpointsFromString("XYZ:2000")
assert.NotNil(t, err)
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsForHostname3(t *testing.T) { // parsing hostname - double form
udpAddr := "127.0.0.1:2000"
tcpAddr := "127.0.0.1:2000"
udpEndpt, _ := resolveUDPAddr(udpAddr)
tcpEndpt, _ := resolveTCPAddr(tcpAddr)
dEndpt, _ := GetDaemonEndpointsFromString("tcp:localhost:2000 udp:localhost:2000")
// Keep dEndpt.UDPAddr.IP as the IPv6 representation of an IPv4 address
if (len(dEndpt.UDPAddr.IP) == 4) {
dEndpt.UDPAddr.IP = dEndpt.UDPAddr.IP.To16()
}
if (len(dEndpt.TCPAddr.IP) == 4) {
dEndpt.TCPAddr.IP = dEndpt.TCPAddr.IP.To16()
}
assert.Equal(t, dEndpt.UDPAddr, udpEndpt)
assert.Equal(t, dEndpt.TCPAddr, tcpEndpt)
}
func TestGetDaemonEndpointsForHostname4(t *testing.T) { // Invalid hostname - double form
dEndpt, err := GetDaemonEndpointsFromString("tcp:ABC:2000 udp:XYZ:2000")
assert.NotNil(t, err)
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsForHostname5(t *testing.T) { // Invalid hostname - double form
dEndpt, err := GetDaemonEndpointsFromString("tcp:localhost:2000 tcp:localhost:2000")
assert.NotNil(t, err)
assert.True(t, strings.Contains(fmt.Sprint(err), addrErr))
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsForHostname6(t *testing.T) { // Invalid port - single form
dEndpt, err := GetDaemonEndpointsFromString("localhost:")
assert.NotNil(t, err)
assert.True(t, strings.Contains(fmt.Sprint(err), portErr))
assert.Nil(t, dEndpt)
}
func TestGetDaemonEndpointsForHostname7(t *testing.T) { // Invalid port - double form
dEndpt, err := GetDaemonEndpointsFromString("tcp:localhost:r4 tcp:localhost:2000")
assert.NotNil(t, err)
assert.True(t, strings.Contains(fmt.Sprint(err), portErr))
assert.Nil(t, dEndpt)
}
// Benchmarks
func BenchmarkGetDaemonEndpoints(b *testing.B) {
for i := 0; i < b.N; i++ {
GetDaemonEndpoints()
}
}
func BenchmarkGetDaemonEndpointsFromEnv_DoubleParse(b *testing.B) {
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", "tcp:127.0.0.1:2000 udp:127.0.0.1:2000")
for i := 0; i < b.N; i++ {
_, err := GetDaemonEndpointsFromEnv()
if err != nil {
return
}
}
os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
}
func BenchmarkGetDaemonEndpointsFromEnv_SingleParse(b *testing.B) {
os.Setenv("AWS_XRAY_DAEMON_ADDRESS", "udp:127.0.0.1:2000")
for i := 0; i < b.N; i++ {
_, err := GetDaemonEndpointsFromEnv()
if err != nil {
return
}
}
os.Unsetenv("AWS_XRAY_DAEMON_ADDRESS")
}
func BenchmarkGetDaemonEndpointsFromString(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := GetDaemonEndpointsFromString("udp:127.0.0.1:2000")
if err != nil {
return
}
}
}
| 348 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package header
import (
"bytes"
"strings"
)
const (
// RootPrefix is the prefix for
// Root attribute in X-Amzn-Trace-Id.
RootPrefix = "Root="
// ParentPrefix is the prefix for
// Parent attribute in X-Amzn-Trace-Id.
ParentPrefix = "Parent="
// SampledPrefix is the prefix for
// Sampled attribute in X-Amzn-Trace-Id.
SampledPrefix = "Sampled="
// SelfPrefix is the prefix for
// Self attribute in X-Amzn-Trace-Id.
SelfPrefix = "Self="
)
// SamplingDecision is a string representation of
// whether or not the current segment has been sampled.
type SamplingDecision string
const (
// Sampled indicates the current segment has been
// sampled and will be sent to the X-Ray daemon.
Sampled SamplingDecision = "Sampled=1"
// NotSampled indicates the current segment has
// not been sampled.
NotSampled SamplingDecision = "Sampled=0"
// Requested indicates sampling decision will be
// made by the downstream service and propagated
// back upstream in the response.
Requested SamplingDecision = "Sampled=?"
// Unknown indicates no sampling decision will be made.
Unknown SamplingDecision = ""
)
func samplingDecision(s string) SamplingDecision {
switch s {
case string(Sampled):
return Sampled
case string(NotSampled):
return NotSampled
case string(Requested):
return Requested
}
return Unknown
}
// Header is the value of X-Amzn-Trace-Id.
type Header struct {
TraceID string
ParentID string
SamplingDecision SamplingDecision
AdditionalData map[string]string
}
// FromString gets individual value for each item in Header struct.
func FromString(s string) *Header {
ret := &Header{
SamplingDecision: Unknown,
AdditionalData: make(map[string]string),
}
parts := strings.Split(s, ";")
for i := range parts {
p := strings.TrimSpace(parts[i])
value, valid := valueFromKeyValuePair(p)
if valid {
switch {
case strings.HasPrefix(p, RootPrefix):
ret.TraceID = value
case strings.HasPrefix(p, ParentPrefix):
ret.ParentID = value
case strings.HasPrefix(p, SampledPrefix):
ret.SamplingDecision = samplingDecision(p)
case !strings.HasPrefix(p, SelfPrefix):
key, valid := keyFromKeyValuePair(p)
if valid {
ret.AdditionalData[key] = value
}
}
}
}
return ret
}
// String returns a string representation for header.
func (h Header) String() string {
var p [][]byte
if h.TraceID != "" {
p = append(p, []byte(RootPrefix+h.TraceID))
}
if h.ParentID != "" {
p = append(p, []byte(ParentPrefix+h.ParentID))
}
p = append(p, []byte(h.SamplingDecision))
for key := range h.AdditionalData {
p = append(p, []byte(key+"="+h.AdditionalData[key]))
}
return string(bytes.Join(p, []byte(";")))
}
func keyFromKeyValuePair(s string) (string, bool) {
e := strings.Index(s, "=")
if -1 != e {
return s[:e], true
}
return "", false
}
func valueFromKeyValuePair(s string) (string, bool) {
e := strings.Index(s, "=")
if -1 != e {
return s[e+1:], true
}
return "", false
}
| 137 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package header
import (
"testing"
"github.com/stretchr/testify/assert"
)
const ExampleTraceID string = "0-57ff426a-80c11c39b0c928905eb0828d"
func TestSampledEqualsOneFromString(t *testing.T) {
h := FromString("Sampled=1")
assert.Equal(t, Sampled, h.SamplingDecision)
assert.Empty(t, h.TraceID)
assert.Empty(t, h.ParentID)
assert.Empty(t, h.AdditionalData)
}
func TestLonghFromString(t *testing.T) {
h := FromString("Sampled=?;Root=" + ExampleTraceID + ";Parent=foo;Self=2;Foo=bar")
assert.Equal(t, Requested, h.SamplingDecision)
assert.Equal(t, ExampleTraceID, h.TraceID)
assert.Equal(t, "foo", h.ParentID)
assert.Equal(t, 1, len(h.AdditionalData))
assert.Equal(t, "bar", h.AdditionalData["Foo"])
}
func TestLonghFromStringWithSpaces(t *testing.T) {
h := FromString("Sampled=?; Root=" + ExampleTraceID + "; Parent=foo; Self=2; Foo=bar")
assert.Equal(t, Requested, h.SamplingDecision)
assert.Equal(t, ExampleTraceID, h.TraceID)
assert.Equal(t, "foo", h.ParentID)
assert.Equal(t, 1, len(h.AdditionalData))
assert.Equal(t, "bar", h.AdditionalData["Foo"])
}
func TestSampledUnknownToString(t *testing.T) {
h := &Header{}
h.SamplingDecision = Unknown
assert.Equal(t, "", h.String())
}
func TestSampledEqualsOneToString(t *testing.T) {
h := &Header{}
h.SamplingDecision = Sampled
assert.Equal(t, "Sampled=1", h.String())
}
func TestSampledEqualsOneAndParentToString(t *testing.T) {
h := &Header{}
h.SamplingDecision = Sampled
h.ParentID = "foo"
assert.Equal(t, "Parent=foo;Sampled=1", h.String())
}
func TestLonghToString(t *testing.T) {
h := &Header{}
h.SamplingDecision = Sampled
h.TraceID = ExampleTraceID
h.ParentID = "foo"
h.AdditionalData = make(map[string]string)
h.AdditionalData["Foo"] = "bar"
assert.Equal(t, "Root="+ExampleTraceID+";Parent=foo;Sampled=1;Foo=bar", h.String())
}
// Benchmark
func BenchmarkFromString(b *testing.B) {
str := "Sampled=?; Root=" + ExampleTraceID + "; Parent=foo; Self=2; Foo=bar"
for i := 0; i < b.N; i++ {
FromString(str)
}
}
| 85 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package awsv2
import (
"context"
v2Middleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-xray-sdk-go/xray"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
type awsV2SubsegmentKey struct{}
func initializeMiddlewareAfter(stack *middleware.Stack) error {
return stack.Initialize.Add(middleware.InitializeMiddlewareFunc("XRayInitializeMiddlewareAfter", func(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error) {
serviceName := v2Middleware.GetServiceID(ctx)
// Start the subsegment
ctx, subseg := xray.BeginSubsegment(ctx, serviceName)
if subseg == nil {
return next.HandleInitialize(ctx, in)
}
subseg.Namespace = "aws"
subseg.GetAWS()["region"] = v2Middleware.GetRegion(ctx)
subseg.GetAWS()["operation"] = v2Middleware.GetOperationName(ctx)
// set the subsegment in the context
ctx = context.WithValue(ctx, awsV2SubsegmentKey{}, subseg)
out, metadata, err = next.HandleInitialize(ctx, in)
// End the subsegment when the response returns from this middleware
defer subseg.Close(err)
return out, metadata, err
}),
middleware.After)
}
func deserializeMiddleware(stack *middleware.Stack) error {
return stack.Deserialize.Add(middleware.DeserializeMiddlewareFunc("XRayDeserializeMiddleware", func(
ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error) {
subseg, ok := ctx.Value(awsV2SubsegmentKey{}).(*xray.Segment)
if !ok {
return next.HandleDeserialize(ctx, in)
}
in.Request.(*smithyhttp.Request).Header.Set(xray.TraceIDHeaderKey, subseg.DownstreamHeader().String())
out, metadata, err = next.HandleDeserialize(ctx, in)
resp, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
// No raw response to wrap with.
return out, metadata, err
}
subseg.GetHTTP().GetResponse().ContentLength = int(resp.ContentLength)
requestID, ok := v2Middleware.GetRequestIDMetadata(metadata)
if ok {
subseg.GetAWS()[xray.RequestIDKey] = requestID
}
if extendedRequestID := resp.Header.Get(xray.S3ExtendedRequestIDHeaderKey); extendedRequestID != "" {
subseg.GetAWS()[xray.ExtendedRequestIDKey] = extendedRequestID
}
xray.HttpCaptureResponse(subseg, resp.StatusCode)
return out, metadata, err
}),
middleware.Before)
}
func AWSV2Instrumentor(apiOptions *[]func(*middleware.Stack) error) {
*apiOptions = append(*apiOptions, initializeMiddlewareAfter, deserializeMiddleware)
}
| 89 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package awsv2
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/route53"
"github.com/aws/aws-sdk-go-v2/service/route53/types"
"github.com/aws/aws-xray-sdk-go/strategy/ctxmissing"
"github.com/aws/aws-xray-sdk-go/xray"
)
func TestAWSV2(t *testing.T) {
cases := map[string]struct {
responseStatus int
responseBody []byte
expectedRegion string
expectedError string
expectedRequestID string
expectedStatusCode int
}{
"fault response": {
responseStatus: 500,
responseBody: []byte(`<?xml version="1.0" encoding="UTF-8"?>
<InvalidChangeBatch xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<Messages>
<Message>Tried to create resource record set duplicate.example.com. type A, but it already exists</Message>
</Messages>
<RequestId>b25f48e8-84fd-11e6-80d9-574e0c4664cb</RequestId>
</InvalidChangeBatch>`),
expectedRegion: "us-east-1",
expectedError: "Error",
expectedRequestID: "b25f48e8-84fd-11e6-80d9-574e0c4664cb",
expectedStatusCode: 500,
},
"error response": {
responseStatus: 404,
responseBody: []byte(`<?xml version="1.0"?>
<ErrorResponse xmlns="http://route53.amazonaws.com/doc/2016-09-07/">
<Error>
<Type>Sender</Type>
<Code>MalformedXML</Code>
<Message>1 validation error detected: Value null at 'route53#ChangeSet' failed to satisfy constraint: Member must not be null</Message>
</Error>
<RequestId>1234567890A</RequestId>
</ErrorResponse>
`),
expectedRegion: "us-west-1",
expectedError: "Error",
expectedRequestID: "1234567890A",
expectedStatusCode: 404,
},
"success response": {
responseStatus: 200,
responseBody: []byte(`<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsResponse>
<ChangeInfo>
<Comment>mockComment</Comment>
<Id>mockID</Id>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>`),
expectedRegion: "us-west-2",
expectedStatusCode: 200,
},
}
for name, c := range cases {
server := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(c.responseStatus)
_, err := w.Write(c.responseBody)
if err != nil {
t.Fatal(err)
}
}))
defer server.Close()
t.Run(name, func(t *testing.T) {
ctx, root := xray.BeginSegment(context.Background(), "AWSSDKV2_Route53")
svc := route53.NewFromConfig(aws.Config{
Region: c.expectedRegion,
EndpointResolver: aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
return aws.Endpoint{
URL: server.URL,
SigningName: "route53",
}, nil
}),
Retryer: func() aws.Retryer {
return aws.NopRetryer{}
},
})
_, _ = svc.ChangeResourceRecordSets(ctx, &route53.ChangeResourceRecordSetsInput{
ChangeBatch: &types.ChangeBatch{
Changes: []types.Change{},
Comment: aws.String("mock"),
},
HostedZoneId: aws.String("zone"),
}, func(options *route53.Options) {
AWSV2Instrumentor(&options.APIOptions)
})
root.Close(nil)
seg := xray.GetSegment(ctx)
var subseg *xray.Segment
_ = json.Unmarshal(seg.Subsegments[0], &subseg)
if e, a := "Route 53", subseg.Name; !strings.EqualFold(e, a) {
t.Errorf("expected segment name to be %s, got %s", e, a)
}
if e, a := c.expectedRegion, fmt.Sprintf("%v", subseg.GetAWS()["region"]); !strings.EqualFold(e, a) {
t.Errorf("expected subsegment name to be %s, got %s", e, a)
}
if e, a := "ChangeResourceRecordSets", fmt.Sprintf("%v", subseg.GetAWS()["operation"]); !strings.EqualFold(e, a) {
t.Errorf("expected operation to be %s, got %s", e, a)
}
if e, a := fmt.Sprint(c.expectedStatusCode), fmt.Sprintf("%v", subseg.GetHTTP().GetResponse().Status); !strings.EqualFold(e, a) {
t.Errorf("expected status code to be %s, got %s", e, a)
}
if e, a := "aws", subseg.Namespace; !strings.EqualFold(e, a) {
t.Errorf("expected namespace to be %s, got %s", e, a)
}
if subseg.GetAWS()[xray.RequestIDKey] != nil {
if e, a := c.expectedRequestID, fmt.Sprintf("%v", subseg.GetAWS()[xray.RequestIDKey]); !strings.EqualFold(e, a) {
t.Errorf("expected request id to be %s, got %s", e, a)
}
}
})
time.Sleep(1 * time.Second)
}
}
func TestAWSV2WithoutSegment(t *testing.T) {
cases := map[string]struct {
responseStatus int
responseBody []byte
}{
"fault response": {
responseStatus: 500,
responseBody: []byte(`<?xml version="1.0" encoding="UTF-8"?>
<InvalidChangeBatch xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<Messages>
<Message>Tried to create resource record set duplicate.example.com. type A, but it already exists</Message>
</Messages>
<RequestId>b25f48e8-84fd-11e6-80d9-574e0c4664cb</RequestId>
</InvalidChangeBatch>`),
},
"error response": {
responseStatus: 404,
responseBody: []byte(`<?xml version="1.0"?>
<ErrorResponse xmlns="http://route53.amazonaws.com/doc/2016-09-07/">
<Error>
<Type>Sender</Type>
<Code>MalformedXML</Code>
<Message>1 validation error detected: Value null at 'route53#ChangeSet' failed to satisfy constraint: Member must not be null</Message>
</Error>
<RequestId>1234567890A</RequestId>
</ErrorResponse>
`),
},
"success response": {
responseStatus: 200,
responseBody: []byte(`<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsResponse>
<ChangeInfo>
<Comment>mockComment</Comment>
<Id>mockID</Id>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>`),
},
}
for name, c := range cases {
server := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(c.responseStatus)
_, err := w.Write(c.responseBody)
if err != nil {
t.Fatal(err)
}
}))
defer server.Close()
t.Run(name, func(t *testing.T) {
// Ignore errors when segment cannot be found.
ctx, err := xray.ContextWithConfig(
context.Background(),
xray.Config{ContextMissingStrategy: ctxmissing.NewDefaultIgnoreErrorStrategy()},
)
if err != nil {
t.Fatal(err)
}
svc := route53.NewFromConfig(aws.Config{
EndpointResolver: aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
return aws.Endpoint{
URL: server.URL,
SigningName: "route53",
}, nil
}),
Retryer: func() aws.Retryer {
return aws.NopRetryer{}
},
})
_, _ = svc.ChangeResourceRecordSets(ctx, &route53.ChangeResourceRecordSetsInput{
ChangeBatch: &types.ChangeBatch{
Changes: []types.Change{},
Comment: aws.String("mock"),
},
HostedZoneId: aws.String("zone"),
}, func(options *route53.Options) {
AWSV2Instrumentor(&options.APIOptions)
})
})
time.Sleep(1 * time.Second)
}
}
| 244 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package distributioncheck
import (
"context"
"testing"
"github.com/aws/aws-xray-sdk-go/xray"
"github.com/stretchr/testify/assert"
)
func TestCreateSegment(t *testing.T) {
_, seg := xray.BeginSegment(context.Background(), "test")
assert.Equal(t, "test", seg.Name)
}
| 23 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package logger
import (
"fmt"
"os"
"github.com/aws/aws-xray-sdk-go/xraylog"
)
// This internal package hides the actual logging functions from the user.
// Logger instance used by xray to log. Set via xray.SetLogger().
var Logger xraylog.Logger = xraylog.NewDefaultLogger(os.Stdout, xraylog.LogLevelInfo)
func Debugf(format string, args ...interface{}) {
Logger.Log(xraylog.LogLevelDebug, printfArgs{format, args})
}
func Debug(args ...interface{}) {
Logger.Log(xraylog.LogLevelDebug, printArgs(args))
}
func DebugDeferred(fn func() string) {
Logger.Log(xraylog.LogLevelDebug, stringerFunc(fn))
}
func Infof(format string, args ...interface{}) {
Logger.Log(xraylog.LogLevelInfo, printfArgs{format, args})
}
func Info(args ...interface{}) {
Logger.Log(xraylog.LogLevelInfo, printArgs(args))
}
func Warnf(format string, args ...interface{}) {
Logger.Log(xraylog.LogLevelWarn, printfArgs{format, args})
}
func Warn(args ...interface{}) {
Logger.Log(xraylog.LogLevelWarn, printArgs(args))
}
func Errorf(format string, args ...interface{}) {
Logger.Log(xraylog.LogLevelError, printfArgs{format, args})
}
func Error(args ...interface{}) {
Logger.Log(xraylog.LogLevelError, printArgs(args))
}
type printfArgs struct {
format string
args []interface{}
}
func (p printfArgs) String() string {
return fmt.Sprintf(p.format, p.args...)
}
type printArgs []interface{}
func (p printArgs) String() string {
return fmt.Sprint([]interface{}(p)...)
}
type stringerFunc func() string
func (sf stringerFunc) String() string {
return sf()
}
| 79 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package logger
import (
"bytes"
"strings"
"testing"
"github.com/aws/aws-xray-sdk-go/xraylog"
)
func TestLogger(t *testing.T) {
oldLogger := Logger
defer func() { Logger = oldLogger }()
var buf bytes.Buffer
// filter properly by level
Logger = xraylog.NewDefaultLogger(&buf, xraylog.LogLevelWarn)
Debug("debug")
Info("info")
Warn("warn")
Error("error")
gotLines := strings.Split(strings.TrimSpace(buf.String()), "\n")
if len(gotLines) != 2 {
t.Fatalf("got %d lines", len(gotLines))
}
if !strings.Contains(gotLines[0], "[WARN] warn") {
t.Error("expected first line to be warn")
}
if !strings.Contains(gotLines[1], "[ERROR] error") {
t.Error("expected second line to be warn")
}
}
func TestDeferredDebug(t *testing.T) {
oldLogger := Logger
defer func() { Logger = oldLogger }()
var buf bytes.Buffer
Logger = xraylog.NewDefaultLogger(&buf, xraylog.LogLevelInfo)
var called bool
DebugDeferred(func() string {
called = true
return "deferred"
})
if called {
t.Error("deferred should not have been called")
}
if buf.String() != "" {
t.Errorf("unexpected log contents: %s", buf.String())
}
Logger = xraylog.NewDefaultLogger(&buf, xraylog.LogLevelDebug)
DebugDeferred(func() string {
called = true
return "deferred"
})
if !called {
t.Error("deferred should have been called")
}
if !strings.Contains(buf.String(), "[DEBUG] deferred") {
t.Errorf("expected deferred message, got %s", buf.String())
}
}
| 84 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package plugins
const (
// EBServiceName is the key name for metadata of ElasticBeanstalkPlugin.
EBServiceName = "elastic_beanstalk"
// EC2ServiceName is the key name for metadata of EC2Plugin.
EC2ServiceName = "ec2"
// ECSServiceName is the key name for metadata of ECSPlugin.
ECSServiceName = "ecs"
)
// InstancePluginMetadata points to the PluginMetadata struct.
var InstancePluginMetadata = &PluginMetadata{}
// PluginMetadata struct contains items to record information
// about the AWS infrastructure hosting the traced application.
type PluginMetadata struct {
// EC2Metadata records the ec2 instance ID and availability zone.
EC2Metadata *EC2Metadata
// BeanstalkMetadata records the Elastic Beanstalk
// environment name, version label, and deployment ID.
BeanstalkMetadata *BeanstalkMetadata
// ECSMetadata records the ECS container ID.
ECSMetadata *ECSMetadata
// Origin records original service of the segment.
Origin string
}
// EC2Metadata provides the shape for unmarshalling EC2 metadata.
type EC2Metadata struct {
InstanceID string `json:"instance_id"`
AvailabilityZone string `json:"availability_zone"`
}
// ECSMetadata provides the shape for unmarshalling
// ECS metadata.
type ECSMetadata struct {
ContainerName string `json:"container"`
}
// BeanstalkMetadata provides the shape for unmarshalling
// Elastic Beanstalk environment metadata.
type BeanstalkMetadata struct {
Environment string `json:"environment_name"`
VersionLabel string `json:"version_label"`
DeploymentID int `json:"deployment_id"`
}
| 62 |
aws-xray-sdk-go | aws | Go | package lambda
import (
"strings"
"github.com/aws/aws-lambda-go/events"
)
func IsSampled(sqsMessge events.SQSMessage) bool {
value, ok := sqsMessge.Attributes["AWSTraceHeader"]
if !ok {
return false
}
return strings.Contains(value, "Sampled=1")
}
| 18 |
aws-xray-sdk-go | aws | Go | package lambda
import (
"testing"
"github.com/aws/aws-lambda-go/events"
"github.com/stretchr/testify/assert"
)
func TestSQSMessageHelper(t *testing.T) {
testTrue(t, "Root=1-632BB806-bd862e3fe1be46a994272793;Sampled=1")
testTrue(t, "Root=1-5759e988-bd862e3fe1be46a994272793;Sampled=1")
testTrue(t, "Root=1-5759e988-bd862e3fe1be46a994272793;Parent=53995c3f42cd8ad8;Sampled=1")
testFalse(t, "Root=1-632BB806-bd862e3fe1be46a994272793")
testFalse(t, "Root=1-632BB806-bd862e3fe1be46a994272793;Sampled=0")
testFalse(t, "Root=1-5759e988-bd862e3fe1be46a994272793;Sampled=0")
testFalse(t, "Root=1-5759e988-bd862e3fe1be46a994272793;Parent=53995c3f42cd8ad8;Sampled=0")
}
func testTrue(t *testing.T, header string) {
var sqsMessage events.SQSMessage
sqsMessage.Attributes = make(map[string]string)
sqsMessage.Attributes["AWSTraceHeader"] = header
assert.True(t, IsSampled(sqsMessage))
}
func testFalse(t *testing.T, header string) {
var sqsMessage events.SQSMessage
sqsMessage.Attributes = make(map[string]string)
sqsMessage.Attributes["AWSTraceHeader"] = header
assert.False(t, IsSampled(sqsMessage))
}
| 34 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
// Package pattern provides a basic pattern matching utility.
// Patterns may contain fixed text, and/or special characters (`*`, `?`).
// `*` represents 0 or more wildcard characters. `?` represents a single wildcard character.
package pattern
import "strings"
// WildcardMatchCaseInsensitive returns true if text matches pattern (case-insensitive); returns false otherwise.
func WildcardMatchCaseInsensitive(pattern, text string) bool {
return WildcardMatch(pattern, text, true)
}
// WildcardMatch returns true if text matches pattern at the given case-sensitivity; returns false otherwise.
func WildcardMatch(pattern, text string, caseInsensitive bool) bool {
patternLen := len(pattern)
textLen := len(text)
if patternLen == 0 {
return textLen == 0
}
if pattern == "*" {
return true
}
if caseInsensitive {
pattern = strings.ToLower(pattern)
text = strings.ToLower(text)
}
i := 0
p := 0
iStar := textLen
pStar := 0
for i < textLen {
if p < patternLen {
switch pattern[p] {
case text[i]:
i++
p++
continue
case '?':
i++
p++
continue
case '*':
iStar = i
pStar = p
p++
continue
}
}
if iStar == textLen {
return false
}
iStar++
i = iStar
p = pStar + 1
}
for p < patternLen && pattern[p] == '*' {
p++
}
return p == patternLen && i == textLen
}
| 75 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package pattern
import (
"bytes"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
)
func TestInvalidArgs(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("", "whatever"))
}
func TestMatchExactPositive(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("foo", "foo"))
}
func TestMatchExactNegative(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("foo", "bar"))
}
func TestSingleWildcardPositive(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("fo?", "foo"))
}
func TestSingleWildcardNegative(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("f?o", "boo"))
}
func TestMultipleWildcardPositive(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("?o?", "foo"))
}
func TestMultipleWildcardNegative(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("f??", "boo"))
}
func TestGlobPositive(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("*oo", "foo"))
}
func TestGlobPositiveZeroOrMore(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("foo*", "foo"))
}
func TestGlobNegativeZeroOrMore(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("foo*", "fo0"))
}
func TestGlobNegative(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("fo*", "boo"))
}
func TestGlobAndSinglePositive(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("*o?", "foo"))
}
func TestGlobAndSingleNegative(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("f?*", "boo"))
}
func TestPureWildcard(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("*", "boo"))
}
func TestMisc(t *testing.T) {
animal1 := "?at"
animal2 := "?o?se"
animal3 := "*s"
vehicle1 := "J*"
vehicle2 := "????"
assert.True(t, WildcardMatchCaseInsensitive(animal1, "bat"))
assert.True(t, WildcardMatchCaseInsensitive(animal1, "cat"))
assert.True(t, WildcardMatchCaseInsensitive(animal2, "horse"))
assert.True(t, WildcardMatchCaseInsensitive(animal2, "mouse"))
assert.True(t, WildcardMatchCaseInsensitive(animal3, "dogs"))
assert.True(t, WildcardMatchCaseInsensitive(animal3, "horses"))
assert.True(t, WildcardMatchCaseInsensitive(vehicle1, "Jeep"))
assert.True(t, WildcardMatchCaseInsensitive(vehicle2, "ford"))
assert.False(t, WildcardMatchCaseInsensitive(vehicle2, "chevy"))
assert.True(t, WildcardMatchCaseInsensitive("*", "cAr"))
assert.True(t, WildcardMatchCaseInsensitive("*/foo", "/bar/foo"))
}
func TestCaseInsensitivity(t *testing.T) {
assert.True(t, WildcardMatch("Foo", "Foo", false))
assert.True(t, WildcardMatch("Foo", "Foo", true))
assert.False(t, WildcardMatch("Foo", "FOO", false))
assert.True(t, WildcardMatch("Foo", "FOO", true))
assert.True(t, WildcardMatch("Fo*", "Foo0", false))
assert.True(t, WildcardMatch("Fo*", "Foo0", true))
assert.False(t, WildcardMatch("Fo*", "FOo0", false))
assert.True(t, WildcardMatch("Fo*", "FOO0", true))
assert.True(t, WildcardMatch("Fo?", "Foo", false))
assert.True(t, WildcardMatch("Fo?", "Foo", true))
assert.False(t, WildcardMatch("Fo?", "FOo", false))
assert.True(t, WildcardMatch("Fo?", "FoO", false))
assert.True(t, WildcardMatch("Fo?", "FOO", true))
}
func TestLongStrings(t *testing.T) {
chars := []byte{'a', 'b', 'c', 'd'}
text := bytes.NewBufferString("a")
for i := 0; i < 8192; i++ {
text.WriteString(string(chars[rand.Intn(len(chars))]))
}
text.WriteString("b")
assert.True(t, WildcardMatchCaseInsensitive("a*b", text.String()))
}
func TestNoGlobs(t *testing.T) {
assert.False(t, WildcardMatchCaseInsensitive("abcd", "abc"))
}
func TestEdgeCaseGlobs(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("", ""))
assert.True(t, WildcardMatchCaseInsensitive("a", "a"))
assert.True(t, WildcardMatchCaseInsensitive("*a", "a"))
assert.True(t, WildcardMatchCaseInsensitive("*a", "ba"))
assert.True(t, WildcardMatchCaseInsensitive("a*", "a"))
assert.True(t, WildcardMatchCaseInsensitive("a*", "ab"))
assert.True(t, WildcardMatchCaseInsensitive("a*a", "aa"))
assert.True(t, WildcardMatchCaseInsensitive("a*a", "aba"))
assert.True(t, WildcardMatchCaseInsensitive("a*a", "aaa"))
assert.True(t, WildcardMatchCaseInsensitive("a*a*", "aa"))
assert.True(t, WildcardMatchCaseInsensitive("a*a*", "aba"))
assert.True(t, WildcardMatchCaseInsensitive("a*a*", "aaa"))
assert.True(t, WildcardMatchCaseInsensitive("a*a*", "aaaaaaaaaaaaaaaaaaaaaaa"))
assert.True(t, WildcardMatchCaseInsensitive("a*b*a*b*a*b*a*b*a*",
"akljd9gsdfbkjhaabajkhbbyiaahkjbjhbuykjakjhabkjhbabjhkaabbabbaaakljdfsjklababkjbsdabab"))
assert.False(t, WildcardMatchCaseInsensitive("a*na*ha", "anananahahanahana"))
}
func TestMultiGlobs(t *testing.T) {
assert.True(t, WildcardMatchCaseInsensitive("*a", "a"))
assert.True(t, WildcardMatchCaseInsensitive("**a", "a"))
assert.True(t, WildcardMatchCaseInsensitive("***a", "a"))
assert.True(t, WildcardMatchCaseInsensitive("**a*", "a"))
assert.True(t, WildcardMatchCaseInsensitive("**a**", "a"))
assert.True(t, WildcardMatchCaseInsensitive("a**b", "ab"))
assert.True(t, WildcardMatchCaseInsensitive("a**b", "abb"))
assert.True(t, WildcardMatchCaseInsensitive("*?", "a"))
assert.True(t, WildcardMatchCaseInsensitive("*?", "aa"))
assert.True(t, WildcardMatchCaseInsensitive("*??", "aa"))
assert.False(t, WildcardMatchCaseInsensitive("*???", "aa"))
assert.True(t, WildcardMatchCaseInsensitive("*?", "aaa"))
assert.True(t, WildcardMatchCaseInsensitive("?", "a"))
assert.False(t, WildcardMatchCaseInsensitive("??", "a"))
assert.True(t, WildcardMatchCaseInsensitive("?*", "a"))
assert.True(t, WildcardMatchCaseInsensitive("*?", "a"))
assert.False(t, WildcardMatchCaseInsensitive("?*?", "a"))
assert.True(t, WildcardMatchCaseInsensitive("?*?", "aa"))
assert.True(t, WildcardMatchCaseInsensitive("*?*", "a"))
assert.False(t, WildcardMatchCaseInsensitive("*?*a", "a"))
assert.True(t, WildcardMatchCaseInsensitive("*?*a*", "ba"))
}
// Benchmark
func BenchmarkWildcardMatch(b *testing.B) {
for i := 0; i < b.N; i++ {
WildcardMatch("*?", "aa", true)
}
}
| 189 |
aws-xray-sdk-go | aws | Go | // Code generated by go-bindata. DO NOT EDIT.
// sources:
// resources/AWSWhitelist.json (11.191kB)
// resources/DefaultSamplingRules.json (97B)
// resources/ExampleSamplingRules.json (609B)
package resources
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
digest [sha256.Size]byte
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _resourcesAwswhitelistJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\x4b\x73\xe2\x38\x10\xbe\xf3\x2b\x5c\x3e\xa7\xf6\xb2\xb7\xdc\x18\xf2\xa8\xd4\x92\x09\x09\xcc\xe6\xb0\xb5\x45\xc9\x52\xe3\x68\x62\x4b\x8e\x1e\x4c\xa8\xad\xfc\xf7\x2d\x49\x86\x80\x31\xb6\x90\x43\x86\x64\xe6\x30\x35\xc1\x6a\xab\xbf\xfe\xfa\xa1\x6e\xf9\xbf\x5e\x14\xc5\x12\xc4\x9c\x62\x90\xf1\x69\x64\x7e\x47\x51\x4c\x16\x0c\xe5\x9c\x24\xab\x27\x51\x14\xf3\x02\x04\x52\x94\x33\xb9\xf6\x34\x8a\xe2\x2f\x48\xe1\x87\x4b\x50\x57\x0a\xf2\x8d\x95\x28\x8a\x05\x3c\x69\x90\x6a\x4a\x40\x62\x41\x0b\xc5\x85\xac\x88\x44\x51\x7c\xe7\x84\xcc\xfb\xdb\xab\x51\x14\xe7\xa8\x88\x4f\x23\x25\x34\x9c\x54\x97\x52\x50\xd3\x47\x58\xc8\x5d\xeb\x02\x18\xca\x61\xaa\x78\x7c\x1a\xc5\x0a\x25\x19\x4c\xcd\x03\x19\x6f\x08\xbe\xac\xfd\x7a\x39\xd9\xc4\x2f\x0b\xce\x24\x4c\x0b\x24\x50\x0e\x0a\x2c\xfe\x7f\x36\xf1\x0f\x38\x93\x3a\x07\x32\x40\x05\xc2\x54\x2d\xd6\x37\xff\xb7\x57\xb3\xb1\xa3\xec\x5e\x50\x05\xbf\x49\x5b\x92\xb6\x09\x23\x36\x86\x0d\x78\x96\x01\x36\x21\x77\x0d\x4a\x50\x2c\xdb\x99\x1d\x08\x40\x0a\x26\x06\xf4\x2e\x5a\x9b\x50\x5d\x66\x3c\x41\xd9\x18\x30\x67\x04\x89\xc5\x15\x23\xf0\x0c\xb2\x8a\x6d\xc8\x71\xbb\xd0\x48\xf0\x39\x95\x94\x33\x20\x93\x07\xc1\x75\xfa\x50\x68\x55\x15\xb2\x40\xbf\xa2\x1c\xda\x2d\x3b\x83\x0c\x9a\xe3\xa5\xc9\xb0\x7a\x45\x47\xe3\x36\x67\x5c\xb0\xdb\xf6\xa2\xd1\x24\x55\xf2\x2e\xba\x5a\x8a\x62\x1b\xcf\x54\x2a\x60\xea\x0e\x10\xa9\x89\xad\xef\x8e\xe1\xf3\xe7\x42\x80\x34\x61\xe6\x17\x5a\xef\x51\xdd\x86\x54\x2a\xab\xbd\x5a\x98\xbc\x2c\x3f\x7f\xc6\x99\x96\x74\x0e\x63\x85\x84\x7a\xb5\xa2\x9a\x83\x34\xa7\xca\xc3\xb4\xc6\x22\xba\xda\xbd\xb6\x84\x66\x54\xaa\xa6\x1a\x8a\xb9\x66\x3b\x05\x6a\x8a\xa8\x93\xdf\x5d\x44\x6b\xc9\x1c\xe9\xe0\x18\x3a\xf2\x9c\xbf\xd5\x20\x16\x21\x76\xf5\x95\x12\x34\xd1\x0a\xe4\x84\x5f\xc2\x56\x4d\x6d\x4e\x1d\x5b\xad\x77\x07\x54\x40\xa2\x8d\x31\x62\x76\xd7\x0b\x2e\x7e\x20\xb1\xa5\x71\x0c\x86\x99\xe3\x49\x4f\x83\xf7\x33\xd0\x0e\x69\x0e\x6c\xeb\xd5\x36\xb6\xab\x0b\x5c\x99\xa3\xdc\x6e\x25\x0f\x9c\x27\x03\x9b\xff\x35\xd1\xc3\x80\x0c\xaa\xb5\xa1\xde\x77\xdf\x0a\x82\x3e\x6d\x13\xe0\x8c\x0b\x3e\x98\x57\xf1\x79\x06\x33\xca\xa8\x9b\x54\x4e\xda\xfb\x3b\xa7\xf6\x30\xfd\x5b\x6f\xfd\xff\xd2\xd6\x58\x3e\x49\x9f\xb9\xaa\x4f\xc8\x08\x44\x4e\x5d\xf4\x07\x10\x32\x44\x09\x64\x55\xc4\xb7\x1a\x34\x7c\x13\x99\x47\x2b\xfd\x80\x58\x0a\xd7\x20\x25\x4a\xe1\x6f\x2a\x69\x42\x33\xe3\xec\x00\x24\x2b\xa5\x15\x30\xaf\xbb\x4e\x68\x0e\x5c\x7b\x64\xc0\x0e\x54\x76\xa2\xea\x04\xad\x5b\x4a\x5c\x20\x9a\x01\xf1\x9d\x4f\xac\xd2\x6e\x35\xb8\xd6\xaf\xfb\x4c\x12\x25\x85\x6f\xc6\x59\xbb\xaa\x8f\xe2\x23\x07\x3a\xd8\x47\xfe\xec\x5c\x82\xb2\xc2\x6b\x5e\xfd\x79\xec\xac\x81\xf0\x06\x6e\xb4\x86\x22\xae\x3b\x8d\xed\xc2\xcd\x0f\x06\xa2\x7f\x3f\xee\x63\xdb\x30\x5f\x91\x8e\x86\xf9\xfb\xc3\x0c\x2e\x67\x80\xc8\x10\x94\x02\x31\xe6\x5a\x60\x17\x06\x3f\xd3\x2f\xcb\x6d\x3c\xdc\x62\xf0\x77\xc4\x6b\xbc\x32\x12\x30\xa3\xcf\x5d\x67\xab\x57\xdc\x07\x1d\xad\x9e\x8c\x9a\xd0\xd1\x4a\xa4\xef\x91\xe6\x77\x80\x81\xce\xbb\x14\xdc\x55\x6e\xba\x59\xb5\x92\x35\xd7\xe8\xf9\xab\xce\x13\x10\x37\xb3\x52\xc7\xb6\x88\x7b\xde\xbc\x8f\xff\x29\x5d\x11\xb8\x47\x54\x99\x25\xd7\x57\xf9\xb4\xd1\x8d\x91\xb3\x32\xe2\xa0\x81\x93\x3b\x2d\x61\xa1\x73\x07\x39\x9f\x43\xb7\xf6\xcc\x3f\x80\xc6\xc0\x48\x87\xe8\x39\x83\x0c\x2d\x96\xbe\xf1\x69\x07\x4f\xea\x34\xf8\xb8\x6c\xe7\x41\x66\x85\xde\xea\x4a\x7a\xe9\x39\xb4\xd4\x76\x80\xeb\xe9\xd2\xa0\x2b\x8f\x76\x61\xcd\x3b\x07\xed\x70\x3c\xdc\x70\xce\x94\xa0\xc7\x92\x38\x01\x99\x5f\x76\x68\x07\x85\x3f\xb3\x3a\x6a\xd1\x57\x47\x73\x8d\x31\x48\x39\xd3\xd5\x26\xe7\x8d\x11\xc9\x95\x9e\xb0\x62\x34\x7e\xb7\x36\xd2\x23\x04\xdf\xa7\x04\xf8\xa7\x7e\xaf\xfa\xd7\xe6\x2c\x9e\xa1\x3c\x21\xc8\x67\x1c\xbf\x62\x73\xfe\x18\x54\x7f\x2f\x34\xb3\x17\x21\x75\x1d\xaf\xd9\x15\x5b\x75\x93\x45\xb1\x7d\x39\xc6\xd3\xba\xc7\xb7\x1a\x65\x74\x46\x41\x74\x1d\x88\x4a\x60\xe7\x42\x70\xb1\x75\x31\xa5\x90\xd2\x72\xc0\x89\xc7\x44\xe9\xb8\xe9\xcb\x05\xc3\x9d\x09\xea\x66\x92\x43\xbd\xd7\x6d\xcc\x9f\x3e\xde\x77\x73\xfb\x17\x8d\x1f\x41\x05\x7d\xaf\xad\x7d\xd5\xae\xcc\x51\x66\x3b\xd0\xf6\xa8\x4f\xec\x1e\x36\xe4\xf7\xad\x11\x6e\xa6\xfd\xb8\xf8\x2f\x41\xdd\x24\xdf\x01\xef\x04\xdf\x14\x14\x7f\xc1\xa2\x63\x55\x3b\x0a\xef\xfd\x1a\x04\xec\x9f\xf3\xe5\x25\x13\x12\x8f\x95\x8a\xd8\xca\xe5\xce\x83\xf2\xa8\xb9\x68\x04\xe7\xcc\x3b\x70\x0f\x45\x9c\x92\x29\xb7\x2c\x86\x75\x2d\x43\x2a\x95\x63\xb2\xc6\x0b\x3e\x96\xd6\xbf\xfc\xc6\x96\x96\x7e\x0a\xb6\xf0\xd3\xc6\xd9\x80\x33\x65\xbf\x99\x1d\x94\xfe\x2e\x01\x36\xd2\xbf\xf2\x91\x31\xd2\x65\x76\x0d\x79\x9a\x52\x96\x1e\x79\x04\x36\xdb\x30\xe2\x19\xc5\x41\x5f\x9e\xca\x37\x3f\x87\x2b\x27\xe8\x63\xb9\xb2\xbe\xdf\x66\x5e\x5f\x3f\x47\x3a\xc9\xa8\x0c\xba\x4c\x99\xf0\x82\xe2\xbe\x60\xfb\x4c\x01\x42\x33\x45\x73\xf8\x43\xa2\x14\x72\x64\x1a\x09\xef\x91\xf0\x9c\x91\x82\x53\x16\x54\x66\x96\xef\x7a\x7f\x42\xee\x99\x7f\x2f\xbd\xff\x03\x00\x00\xff\xff\x0e\x2e\x89\x4c\xb7\x2b\x00\x00")
func resourcesAwswhitelistJsonBytes() ([]byte, error) {
return bindataRead(
_resourcesAwswhitelistJson,
"resources/AWSWhitelist.json",
)
}
func resourcesAwswhitelistJson() (*asset, error) {
bytes, err := resourcesAwswhitelistJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "resources/AWSWhitelist.json", size: 11191, mode: os.FileMode(0644), modTime: time.Unix(1573699810, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0xee, 0x6f, 0x3d, 0xd1, 0x64, 0x48, 0xfb, 0xd4, 0xf7, 0x9, 0xda, 0x78, 0xfe, 0x46, 0xd, 0x67, 0xa6, 0xb3, 0xa7, 0x22, 0x68, 0xee, 0x2, 0x3c, 0x30, 0xe4, 0x51, 0xa6, 0xe1, 0x9b, 0x5a}}
return a, nil
}
var _resourcesDefaultsamplingrulesJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xaa\xe6\x52\x50\x50\x2a\x4b\x2d\x2a\xce\xcc\xcf\x53\xb2\x52\x30\xd2\x01\xf1\x53\x52\xd3\x12\x4b\x73\x4a\x94\xac\x14\x40\xd2\x0a\x0a\x4a\x69\x99\x15\xa9\x29\xf1\x25\x89\x45\xe9\xa9\x20\x51\x43\x1d\x88\x70\x51\x62\x49\xaa\x92\x95\x82\x81\x9e\x81\x29\x97\x82\x42\x2d\x58\x6f\x51\x69\x4e\x6a\xb1\x92\x95\x42\x34\x97\x82\x42\x2c\x57\x2d\x17\x20\x00\x00\xff\xff\x47\xbd\xc4\xe0\x61\x00\x00\x00")
func resourcesDefaultsamplingrulesJsonBytes() ([]byte, error) {
return bindataRead(
_resourcesDefaultsamplingrulesJson,
"resources/DefaultSamplingRules.json",
)
}
func resourcesDefaultsamplingrulesJson() (*asset, error) {
bytes, err := resourcesDefaultsamplingrulesJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "resources/DefaultSamplingRules.json", size: 97, mode: os.FileMode(0644), modTime: time.Unix(1573698794, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xce, 0x2c, 0x7, 0xe9, 0x6a, 0x70, 0x3e, 0xab, 0x1, 0xcb, 0x1d, 0xc, 0x29, 0x18, 0x89, 0xf0, 0x34, 0xa, 0x26, 0x20, 0x2a, 0x2c, 0x60, 0xc3, 0xe8, 0x8c, 0x44, 0x23, 0xf8, 0xb9, 0x27, 0x69}}
return a, nil
}
var _resourcesExamplesamplingrulesJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x90\xcd\xae\xd3\x30\x14\x84\xf7\x79\x8a\x91\x37\x17\xae\x72\x43\x8b\xc4\x26\x3b\x16\xbc\x00\x62\x87\x50\x74\x1a\x9f\xd4\x47\x38\x76\xea\x9f\xb6\x08\xf5\xdd\x91\x9d\x96\xd2\xb2\xcc\xcc\x38\x67\xbe\xf9\xdd\x00\xea\xc8\x21\x8a\x77\xaa\xc7\xc7\xb6\x7c\x6b\x9e\x28\xdb\xa4\x7a\x14\xbb\x0a\x71\x0c\xb2\xa4\x35\xa4\x3e\xe3\x9a\x40\xc8\x96\x5b\x50\x84\xb8\xd1\x66\xcd\x1a\x3b\xb6\xfe\xd4\x42\x22\x02\x1f\xb2\x04\xd6\x10\x07\x72\xbf\x10\x69\x5e\xac\xb8\x7d\x7d\x14\x31\x89\xe5\x0e\xef\x22\x87\xa3\x8c\x3c\x38\x9a\xb9\x85\x49\x69\x19\x66\x4e\xc6\xeb\x16\xe4\x34\x72\xb0\xc3\x42\xc9\x80\x02\x63\x92\x33\x6b\x24\x8f\x97\xd7\x17\x4c\x3e\x20\x99\x72\x27\x5b\xee\xde\xab\x76\xad\x5a\x33\x43\xa2\xb0\xe7\x02\xb0\xbd\xca\x81\x12\xab\x1e\x9b\x6e\xf3\xa9\x01\x2e\x15\xb3\xf6\x50\x3d\xbe\xd7\xc8\x8a\xfa\x3f\xec\x97\x73\xe9\xcd\x28\x2d\xde\x76\x14\x59\xd7\x8b\x2b\x68\x87\xaf\x15\xa6\xb4\xe3\x23\xd9\x4c\x69\x05\x16\xfd\xe6\x83\xe6\xd0\x22\x19\x7e\x98\x0b\x27\xb1\x16\x3b\x46\x2e\xbf\x92\x09\xce\x3b\xc6\x4c\x69\x34\x35\x2b\x6e\xf4\x73\x9d\x89\x0f\x99\x63\xea\xf0\xad\x50\x4a\x04\xad\xef\x57\x70\xc6\x68\x78\xfc\xe9\x73\xc2\x42\x7b\xee\xae\xfc\x80\x12\x5d\x6a\x6f\xef\x82\xf1\xb1\x4c\xa1\x5e\xff\x91\xee\x3b\x3f\x39\xb7\xc1\x8b\xfc\xe1\x76\xe2\x6e\x3f\xcf\xbb\xf9\xeb\x3c\x2e\x0c\x5c\x1a\xe0\x47\x73\xf9\x13\x00\x00\xff\xff\x8e\x14\x8f\x3e\x61\x02\x00\x00")
func resourcesExamplesamplingrulesJsonBytes() ([]byte, error) {
return bindataRead(
_resourcesExamplesamplingrulesJson,
"resources/ExampleSamplingRules.json",
)
}
func resourcesExamplesamplingrulesJson() (*asset, error) {
bytes, err := resourcesExamplesamplingrulesJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "resources/ExampleSamplingRules.json", size: 609, mode: os.FileMode(0644), modTime: time.Unix(1573698794, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0xb2, 0x3e, 0xf4, 0x1b, 0x71, 0xde, 0x47, 0x26, 0x3d, 0xba, 0xd4, 0x75, 0xf1, 0xb2, 0x19, 0x63, 0x88, 0x1d, 0x1b, 0x8b, 0xda, 0x21, 0xaf, 0x67, 0xcd, 0x20, 0x27, 0x59, 0x9c, 0xe1, 0x6c}}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"resources/AWSWhitelist.json": resourcesAwswhitelistJson,
"resources/DefaultSamplingRules.json": resourcesDefaultsamplingrulesJson,
"resources/ExampleSamplingRules.json": resourcesExamplesamplingrulesJson,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"resources": &bintree{nil, map[string]*bintree{
"AWSWhitelist.json": &bintree{resourcesAwswhitelistJson, map[string]*bintree{}},
"DefaultSamplingRules.json": &bintree{resourcesDefaultsamplingrulesJson, map[string]*bintree{}},
"ExampleSamplingRules.json": &bintree{resourcesExamplesamplingrulesJson, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory.
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
}
// RestoreAssets restores an asset under the given directory recursively.
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}
| 320 |
aws-xray-sdk-go | aws | Go | package main
import (
"context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-xray-sdk-go/xray"
"golang.org/x/net/context/ctxhttp"
"log"
"net/http"
"os"
)
func webServer() {
http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("healthcheck"))
}))
//test http instrumentation
http.Handle("/outgoing-http-call", xray.Handler(xray.NewFixedSegmentNamer("/outgoing-http-call"), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := ctxhttp.Get(r.Context(), xray.Client(nil), "https://aws.amazon.com")
if err != nil {
log.Println(err)
return
}
_, _ = w.Write([]byte("Tracing http call!"))
})))
//test aws sdk instrumentation
http.Handle("/aws-sdk-call", xray.Handler(xray.NewFixedSegmentNamer("/aws-sdk-call"), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
testAWSCalls(r.Context())
_, _ = w.Write([]byte("Tracing aws sdk call!"))
})))
listenAddress := os.Getenv("LISTEN_ADDRESS")
if listenAddress == "" {
listenAddress = "127.0.0.1:5000"
}
_ = http.ListenAndServe(listenAddress, nil)
log.Printf("App is listening on %s !", listenAddress)
}
func testAWSCalls(ctx context.Context) {
awsSess := session.Must(session.NewSession(&aws.Config{
Region: aws.String("us-west-2")},))
s3Client := s3.New(awsSess)
xray.AWS(s3Client.Client)
if _, err := s3Client.ListBucketsWithContext(ctx, nil); err != nil {
log.Println(err)
return
}
log.Println("Successfully traced aws sdk call")
}
func main() {
webServer()
}
| 60 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
// Package ctxmissing provides control over
// the behavior of the X-Ray SDK when subsegments
// are created without a provided parent segment.
package ctxmissing
// Strategy provides an interface for
// implementing context missing strategies.
type Strategy interface {
ContextMissing(v interface{})
}
| 19 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package ctxmissing
import (
"bytes"
"strings"
"testing"
"github.com/aws/aws-xray-sdk-go/internal/logger"
"github.com/aws/aws-xray-sdk-go/xraylog"
"github.com/stretchr/testify/assert"
)
func TestDefaultRuntimeErrorStrategy(t *testing.T) {
defer func() {
if p := recover(); p != nil {
assert.Equal(t, "TestRuntimeError", p.(string))
}
}()
r := NewDefaultRuntimeErrorStrategy()
r.ContextMissing("TestRuntimeError")
}
func TestDefaultLogErrorStrategy(t *testing.T) {
oldLogger := logger.Logger
defer func() { logger.Logger = oldLogger }()
var buf bytes.Buffer
logger.Logger = xraylog.NewDefaultLogger(&buf, xraylog.LogLevelDebug)
l := NewDefaultLogErrorStrategy()
l.ContextMissing("TestLogError")
assert.True(t, strings.Contains(buf.String(), "Suppressing AWS X-Ray context missing panic: TestLogError"))
}
func TestDefaultIgnoreErrorStrategy(t *testing.T) {
defer func() {
p := recover()
assert.Equal(t, p, nil)
}()
r := NewDefaultIgnoreErrorStrategy()
r.ContextMissing("TestIgnoreError")
}
| 51 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package ctxmissing
import "github.com/aws/aws-xray-sdk-go/internal/logger"
// RuntimeErrorStrategy provides the AWS_XRAY_CONTEXT_MISSING
// environment variable value for enabling the runtime error
// context missing strategy (panic).
var RuntimeErrorStrategy = "RUNTIME_ERROR"
// LogErrorStrategy provides the AWS_XRAY_CONTEXT_MISSING
// environment variable value for enabling the log error
// context missing strategy.
var LogErrorStrategy = "LOG_ERROR"
// IgnoreErrorStrategy provides the AWS_XRAY_CONTEXT_MISSING
// environment variable value for enabling the ignore error
// context missing strategy.
var IgnoreErrorStrategy = "IGNORE_ERROR"
// DefaultRuntimeErrorStrategy implements the
// runtime error context missing strategy.
type DefaultRuntimeErrorStrategy struct{}
// DefaultLogErrorStrategy implements the
// log error context missing strategy.
type DefaultLogErrorStrategy struct{}
// DefaultIgnoreErrorStrategy implements the
// ignore error context missing strategy.
type DefaultIgnoreErrorStrategy struct{}
// NewDefaultRuntimeErrorStrategy initializes
// an instance of DefaultRuntimeErrorStrategy.
func NewDefaultRuntimeErrorStrategy() *DefaultRuntimeErrorStrategy {
return &DefaultRuntimeErrorStrategy{}
}
// NewDefaultLogErrorStrategy initializes
// an instance of DefaultLogErrorStrategy.
func NewDefaultLogErrorStrategy() *DefaultLogErrorStrategy {
return &DefaultLogErrorStrategy{}
}
// NewDefaultIgnoreErrorStrategy initializes
// an instance of DefaultIgnoreErrorStrategy.
func NewDefaultIgnoreErrorStrategy() *DefaultIgnoreErrorStrategy {
return &DefaultIgnoreErrorStrategy{}
}
// ContextMissing panics when the segment context is missing.
func (dr *DefaultRuntimeErrorStrategy) ContextMissing(v interface{}) {
panic(v)
}
// ContextMissing logs an error message when the
// segment context is missing.
func (dl *DefaultLogErrorStrategy) ContextMissing(v interface{}) {
logger.Errorf("Suppressing AWS X-Ray context missing panic: %v", v)
}
// ContextMissing ignores an error message when the
// segment context is missing.
func (di *DefaultIgnoreErrorStrategy) ContextMissing(v interface{}) {
// do nothing
}
| 74 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package exception
import (
"bytes"
"crypto/rand"
goerrors "errors"
"fmt"
"runtime"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/pkg/errors"
)
// StackTracer is an interface for implementing StackTrace method.
type StackTracer interface {
StackTrace() []uintptr
}
// Exception provides the shape for unmarshalling an exception.
type Exception struct {
ID string `json:"id,omitempty"`
Type string `json:"type,omitempty"`
Message string `json:"message,omitempty"`
Stack []Stack `json:"stack,omitempty"`
Remote bool `json:"remote,omitempty"`
}
// Stack provides the shape for unmarshalling an stack.
type Stack struct {
Path string `json:"path,omitempty"`
Line int `json:"line,omitempty"`
Label string `json:"label,omitempty"`
}
// MultiError is a type for a slice of error.
type MultiError []error
// Error returns a string format of concatenating multiple errors.
func (e MultiError) Error() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d errors occurred:\n", len(e))
for _, err := range e {
buf.WriteString("* ")
buf.WriteString(err.Error())
buf.WriteByte('\n')
}
return buf.String()
}
var defaultErrorFrameCount = 32
// DefaultFormattingStrategy is the default implementation of
// the ExceptionFormattingStrategy and has a configurable frame count.
type DefaultFormattingStrategy struct {
FrameCount int
}
// NewDefaultFormattingStrategy initializes DefaultFormattingStrategy
// with default value of frame count.
func NewDefaultFormattingStrategy() (*DefaultFormattingStrategy, error) {
return &DefaultFormattingStrategy{FrameCount: defaultErrorFrameCount}, nil
}
// NewDefaultFormattingStrategyWithDefinedErrorFrameCount initializes
// DefaultFormattingStrategy with customer defined frame count.
func NewDefaultFormattingStrategyWithDefinedErrorFrameCount(frameCount int) (*DefaultFormattingStrategy, error) {
if frameCount > 32 || frameCount < 0 {
return nil, errors.New("frameCount must be a non-negative integer and less than 32")
}
return &DefaultFormattingStrategy{FrameCount: frameCount}, nil
}
// Error returns the value of XRayError by given error message.
func (dEFS *DefaultFormattingStrategy) Error(message string) *XRayError {
s := make([]uintptr, dEFS.FrameCount)
n := runtime.Callers(2, s)
s = s[:n]
return &XRayError{
Type: "error",
Message: message,
Stack: s,
}
}
// Errorf formats according to a format specifier and returns value of XRayError.
func (dEFS *DefaultFormattingStrategy) Errorf(formatString string, args ...interface{}) *XRayError {
e := dEFS.Error(fmt.Sprintf(formatString, args...))
e.Stack = e.Stack[1:]
return e
}
// Panic records error type as panic in segment and returns value of XRayError.
func (dEFS *DefaultFormattingStrategy) Panic(message string) *XRayError {
e := dEFS.Error(message)
e.Type = "panic"
e.Stack = filterPanicStack(e.Stack)
return e
}
// Panicf formats according to a format specifier and returns value of XRayError.
func (dEFS *DefaultFormattingStrategy) Panicf(formatString string, args ...interface{}) *XRayError {
e := dEFS.Panic(fmt.Sprintf(formatString, args...))
return e
}
// ExceptionFromError takes an error and returns value of Exception
func (dEFS *DefaultFormattingStrategy) ExceptionFromError(err error) Exception {
var isRemote bool
var reqErr awserr.RequestFailure
if goerrors.As(err, &reqErr) {
// A service error occurs
if reqErr.RequestID() != "" {
isRemote = true
}
}
// Fetches type from err
t := fmt.Sprintf("%T", err)
// normalize the type
t = strings.Replace(t, "*", "", -1)
e := Exception{
ID: newExceptionID(),
Type: t,
Message: err.Error(),
Remote: isRemote,
}
xRayErr := &XRayError{}
if goerrors.As(err, &xRayErr) {
e.Type = xRayErr.Type
}
var s []uintptr
// This is our publicly supported interface for passing along stack traces
var st StackTracer
if goerrors.As(err, &st) {
s = st.StackTrace()
}
// We also accept github.com/pkg/errors style stack traces for ease of use
var est interface {
StackTrace() errors.StackTrace
}
if goerrors.As(err, &est) {
for _, frame := range est.StackTrace() {
s = append(s, uintptr(frame))
}
}
if s == nil {
s = make([]uintptr, dEFS.FrameCount)
n := runtime.Callers(5, s)
s = s[:n]
}
e.Stack = convertStack(s)
return e
}
func newExceptionID() string {
var r [8]byte
_, err := rand.Read(r[:])
if err != nil {
panic(err)
}
return fmt.Sprintf("%02x", r)
}
func filterPanicStack(stack []uintptr) []uintptr {
// filter out frames through the first runtime/panic.go frame
frames := runtime.CallersFrames(stack)
loc := 0
index := 0
d := true
for frame, more := frames.Next(); d; frame, more = frames.Next() {
loc++
path, _, label := parseFrame(frame)
if label == "gopanic" && path == "runtime/panic.go" {
index = loc
break
}
d = more
}
return stack[index:]
}
func convertStack(s []uintptr) []Stack {
var r []Stack
frames := runtime.CallersFrames(s)
d := true
for frame, more := frames.Next(); d; frame, more = frames.Next() {
f := &Stack{}
f.Path, f.Line, f.Label = parseFrame(frame)
r = append(r, *f)
d = more
}
return r
}
func parseFrame(frame runtime.Frame) (string, int, string) {
path, line, label := frame.File, frame.Line, frame.Function
// Strip GOPATH from path by counting the number of seperators in label & path
// For example:
// GOPATH = /home/user
// path = /home/user/src/pkg/sub/file.go
// label = pkg/sub.Type.Method
// We want to set path to:
// pkg/sub/file.go
i := len(path)
for n, g := 0, strings.Count(label, "/")+2; n < g; n++ {
i = strings.LastIndex(path[:i], "/")
if i == -1 {
// Something went wrong and path has less seperators than we expected
// Abort and leave i as -1 to counteract the +1 below
break
}
}
path = path[i+1:] // Trim the initial /
// Strip the path from the function name as it's already in the path
label = label[strings.LastIndex(label, "/")+1:]
// Likewise strip the package name
label = label[strings.Index(label, ".")+1:]
return path, line, label
}
| 242 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package exception
// FormattingStrategy provides an interface for implementing methods that format errors and exceptions.
type FormattingStrategy interface {
Error(message string) *XRayError
Errorf(formatString string, args ...interface{}) *XRayError
Panic(message string) *XRayError
Panicf(formatString string, args ...interface{}) *XRayError
ExceptionFromError(err error) Exception
}
| 19 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package exception
import (
"errors"
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/stretchr/testify/assert"
)
func TestMultiErrorReturnStringFormat(t *testing.T) {
var err MultiError
err = append(err, errors.New("error one"))
err = append(err, errors.New("error two"))
assert.Equal(t, "2 errors occurred:\n* error one\n* error two\n", err.Error())
}
func TestDefaultFormattingStrategyWithInvalidFrameCount(t *testing.T) {
dss, e := NewDefaultFormattingStrategyWithDefinedErrorFrameCount(-1)
ds, err := NewDefaultFormattingStrategyWithDefinedErrorFrameCount(33)
assert.Nil(t, dss)
assert.Nil(t, ds)
assert.Error(t, e, "frameCount must be a non-negative integer and less than 32")
assert.Error(t, err, "frameCount must be a non-negative integer and less than 32")
}
func TestNewDefaultFormattingStrategyWithValidFrameCount(t *testing.T) {
dss, e := NewDefaultFormattingStrategyWithDefinedErrorFrameCount(10)
assert.Nil(t, e)
assert.Equal(t, 10, dss.FrameCount)
}
func TestError(t *testing.T) {
defs, _ := NewDefaultFormattingStrategy()
err := defs.Error("Test")
stack := convertStack(err.StackTrace())
assert.Equal(t, "Test", err.Error())
assert.Equal(t, "error", err.Type)
assert.Equal(t, "TestError", stack[0].Label)
}
func TestErrorf(t *testing.T) {
defs, _ := NewDefaultFormattingStrategy()
err := defs.Errorf("Test")
stack := convertStack(err.StackTrace())
assert.Equal(t, "Test", err.Error())
assert.Equal(t, "error", err.Type)
assert.Equal(t, "TestErrorf", stack[0].Label)
}
func TestPanic(t *testing.T) {
defs, _ := NewDefaultFormattingStrategy()
var err *XRayError
func() {
defer func() {
err = defs.Panic(recover().(string))
}()
panic("Test")
}()
stack := convertStack(err.StackTrace())
assert.Equal(t, "Test", err.Error())
assert.Equal(t, "panic", err.Type)
assert.Equal(t, "TestPanic.func1", stack[0].Label)
assert.Equal(t, "TestPanic", stack[1].Label)
}
func TestPanicf(t *testing.T) {
defs, _ := NewDefaultFormattingStrategy()
var err *XRayError
func() {
defer func() {
err = defs.Panicf("%v", recover())
}()
panic("Test")
}()
stack := convertStack(err.StackTrace())
assert.Equal(t, "Test", err.Error())
assert.Equal(t, "panic", err.Type)
assert.Equal(t, "TestPanicf.func1", stack[0].Label)
assert.Equal(t, "TestPanicf", stack[1].Label)
}
func TestExceptionFromError(t *testing.T) {
defaultStrategy := &DefaultFormattingStrategy{}
err := defaultStrategy.ExceptionFromError(errors.New("new error"))
assert.NotNil(t, err.ID)
assert.Equal(t, "new error", err.Message)
assert.Equal(t, "errors.errorString", err.Type)
}
func TestExceptionFromErrorRequestFailure(t *testing.T) {
defaultStrategy := &DefaultFormattingStrategy{}
reqErr := awserr.NewRequestFailure(awserr.New("error code", "error message", errors.New("new error")), 400, "1234")
err := defaultStrategy.ExceptionFromError(reqErr)
assert.NotNil(t, err.ID)
assert.Contains(t, err.Message, "new error")
assert.Contains(t, err.Message, "1234")
assert.Equal(t, "awserr.requestError", err.Type)
assert.Equal(t, true, err.Remote)
}
func TestExceptionFromErrorXRayError(t *testing.T) {
defaultStrategy := &DefaultFormattingStrategy{}
xRayErr := defaultStrategy.Error("new XRayError")
err := defaultStrategy.ExceptionFromError(xRayErr)
assert.NotNil(t, err.ID)
assert.Equal(t, "new XRayError", err.Message)
assert.Equal(t, "error", err.Type)
}
// Benchmarks
func BenchmarkDefaultFormattingStrategy_Error(b *testing.B) {
defs, _ := NewDefaultFormattingStrategy()
err := defs.Error("Test")
for i := 0; i < b.N; i++ {
convertStack(err.StackTrace())
}
}
func BenchmarkDefaultFormattingStrategy_ExceptionFromError(b *testing.B) {
defaultStrategy := &DefaultFormattingStrategy{}
err := "new error"
for i := 0; i < b.N; i++ {
defaultStrategy.ExceptionFromError(errors.New(err))
}
}
func BenchmarkDefaultFormattingStrategy_Panic(b *testing.B) {
defs, _ := NewDefaultFormattingStrategy()
for i := 0; i < b.N; i++ {
var err *XRayError
func() {
defer func() {
err = defs.Panic(recover().(string))
}()
panic("Test")
}()
convertStack(err.StackTrace())
}
}
| 167 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package exception
// XRayError records error type, message,
// and a slice of stack frame pointers.
type XRayError struct {
Type string
Message string
Stack []uintptr
}
// Error returns the value of error message.
func (e *XRayError) Error() string {
return e.Message
}
// StackTrace returns a slice of integer pointers.
func (e *XRayError) StackTrace() []uintptr {
return e.Stack
}
| 28 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package sampling
import (
crypto "crypto/rand"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/aws/aws-xray-sdk-go/daemoncfg"
"github.com/aws/aws-xray-sdk-go/internal/logger"
"github.com/aws/aws-xray-sdk-go/internal/plugins"
"github.com/aws/aws-xray-sdk-go/utils"
xraySvc "github.com/aws/aws-sdk-go/service/xray"
)
// CentralizedStrategy is an implementation of SamplingStrategy. It
// performs quota-based sampling with X-Ray acting as arbitrator for clients.
// It will fall back to LocalizedStrategy if sampling rules are not available from X-Ray APIS.
type CentralizedStrategy struct {
// List of known centralized sampling rules
manifest *CentralizedManifest
// Sampling strategy used if centralized manifest is expired
fallback *LocalizedStrategy
// XRay service proxy used for getting quotas and sampling rules
proxy svcProxy
// Unique ID used by XRay service to identify this client
clientID string
// Provides system time
clock utils.Clock
// Provides random numbers
rand utils.Rand
// pollerStart, if true represents rule and target pollers are started
pollerStart bool
// represents daemon endpoints
daemonEndpoints *daemoncfg.DaemonEndpoints
mu sync.RWMutex
}
// svcProxy is the interface for API calls to X-Ray service.
type svcProxy interface {
GetSamplingTargets(s []*xraySvc.SamplingStatisticsDocument) (*xraySvc.GetSamplingTargetsOutput, error)
GetSamplingRules() ([]*xraySvc.SamplingRuleRecord, error)
}
// NewCentralizedStrategy creates a centralized sampling strategy with a fallback on
// local default rule.
func NewCentralizedStrategy() (*CentralizedStrategy, error) {
fb, err := NewLocalizedStrategy()
if err != nil {
return nil, err
}
return newCentralizedStrategy(fb)
}
// NewCentralizedStrategyWithJSONBytes creates a centralized sampling strategy with a fallback on
// local rules specified in the given byte slice.
func NewCentralizedStrategyWithJSONBytes(b []byte) (*CentralizedStrategy, error) {
fb, err := NewLocalizedStrategyFromJSONBytes(b)
if err != nil {
return nil, err
}
return newCentralizedStrategy(fb)
}
// NewCentralizedStrategyWithFilePath creates a centralized sampling strategy with a fallback on
// local rules located at the given file path.
func NewCentralizedStrategyWithFilePath(fp string) (*CentralizedStrategy, error) {
fb, err := NewLocalizedStrategyFromFilePath(fp)
if err != nil {
return nil, err
}
return newCentralizedStrategy(fb)
}
func newCentralizedStrategy(fb *LocalizedStrategy) (*CentralizedStrategy, error) {
// Generate clientID
var r [12]byte
_, err := crypto.Read(r[:])
if err != nil {
return nil, err
}
id := fmt.Sprintf("%02x", r)
clock := &utils.DefaultClock{}
rand := &utils.DefaultRand{}
m := &CentralizedManifest{
Rules: []*CentralizedRule{},
Index: map[string]*CentralizedRule{},
clock: clock,
}
ss := &CentralizedStrategy{
manifest: m,
fallback: fb,
clientID: id,
clock: clock,
rand: rand,
}
return ss, nil
}
// ShouldTrace determines whether a request should be sampled. It matches the given parameters against
// a list of known rules and uses the matched rule's values to make a decision.
func (ss *CentralizedStrategy) ShouldTrace(request *Request) *Decision {
ss.mu.Lock()
if !ss.pollerStart {
ss.start()
}
ss.mu.Unlock()
if request.ServiceType == "" {
request.ServiceType = plugins.InstancePluginMetadata.Origin
}
logger.Debugf(
"Determining ShouldTrace decision for:\n\thost: %s\n\tpath: %s\n\tmethod: %s\n\tservicename: %s\n\tservicetype: %s",
request.Host,
request.URL,
request.Method,
request.ServiceName,
request.ServiceType,
)
// Use fallback if manifest is expired
if ss.manifest.expired() {
logger.Debug("Centralized sampling data expired. Using fallback sampling strategy")
return ss.fallback.ShouldTrace(request)
}
ss.manifest.mu.RLock()
defer ss.manifest.mu.RUnlock()
// Match against known rules
for _, r := range ss.manifest.Rules {
r.mu.RLock()
applicable := r.AppliesTo(request)
r.mu.RUnlock()
if !applicable {
continue
}
logger.Debugf("Applicable rule: %s", r.ruleName)
return r.Sample()
}
// Match against default rule
if r := ss.manifest.Default; r != nil {
logger.Debugf("Applicable rule: %s", r.ruleName)
return r.Sample()
}
// Use fallback if default rule is unavailable
logger.Debug("Centralized default sampling rule unavailable. Using fallback sampling strategy")
return ss.fallback.ShouldTrace(request)
}
// start initiates rule and target pollers.
func (ss *CentralizedStrategy) start() {
if !ss.pollerStart {
var er error
ss.proxy, er = newProxy(ss.daemonEndpoints)
if er != nil {
panic(er)
}
ss.startRulePoller()
ss.startTargetPoller()
}
ss.pollerStart = true
}
// startRulePoller starts rule poller.
func (ss *CentralizedStrategy) startRulePoller() {
// Initial refresh
go func() {
if err := ss.refreshManifest(); err != nil {
logger.Debugf("Error occurred during initial refresh of sampling rules. %v", err)
} else {
logger.Info("Successfully fetched sampling rules")
}
}()
// Periodic manifest refresh
go func() {
// Period = 300s, Jitter = 5s
t := utils.NewTimer(300*time.Second, 5*time.Second)
for range t.C() {
t.Reset()
if err := ss.refreshManifest(); err != nil {
logger.Debugf("Error occurred while refreshing sampling rules. %v", err)
} else {
logger.Debug("Successfully fetched sampling rules")
}
}
}()
}
// startTargetPoller starts target poller.
func (ss *CentralizedStrategy) startTargetPoller() {
// Periodic quota refresh
go func() {
// Period = 10.1s, Jitter = 100ms
t := utils.NewTimer(10*time.Second+100*time.Millisecond, 100*time.Millisecond)
for range t.C() {
t.Reset()
if err := ss.refreshTargets(); err != nil {
logger.Debugf("Error occurred while refreshing targets for sampling rules. %v", err)
}
}
}()
}
// refreshManifest refreshes the manifest by calling the XRay service proxy for sampling rules
func (ss *CentralizedStrategy) refreshManifest() (err error) {
// Explicitly recover from panics since this is the entry point for a long-running goroutine
// and we can not allow a panic to propagate to the application code.
defer func() {
if r := recover(); r != nil {
// Resort to bring rules array into consistent state.
ss.manifest.sort()
err = fmt.Errorf("%v", r)
}
}()
// Compute 'now' before calling GetSamplingRules to avoid marking manifest as
// fresher than it actually is.
now := ss.clock.Now().Unix()
// Get sampling rules from proxy
records, err := ss.proxy.GetSamplingRules()
if err != nil {
return
}
// Set of rules to exclude from pruning
actives := map[*CentralizedRule]bool{}
// Create missing rules. Update existing ones.
failed := false
for _, record := range records {
// Extract rule from record
svcRule := record.SamplingRule
if svcRule == nil {
logger.Debug("Sampling rule missing from sampling rule record.")
failed = true
continue
}
if svcRule.RuleName == nil {
logger.Debug("Sampling rule without rule name is not supported")
failed = true
continue
}
// Only sampling rule with version 1 is valid
if svcRule.Version == nil {
logger.Debug("Sampling rule without version number is not supported: ", *svcRule.RuleName)
failed = true
continue
}
version := *svcRule.Version
if version != int64(1) {
logger.Debug("Sampling rule without version 1 is not supported: ", *svcRule.RuleName)
failed = true
continue
}
if len(svcRule.Attributes) != 0 {
logger.Debug("Sampling rule with non nil Attributes is not applicable: ", *svcRule.RuleName)
continue
}
if svcRule.ResourceARN == nil {
logger.Debug("Sampling rule without ResourceARN is not applicable: ", *svcRule.RuleName)
continue
}
resourceARN := *svcRule.ResourceARN
if resourceARN != "*" {
logger.Debug("Sampling rule with ResourceARN not equal to * is not applicable: ", *svcRule.RuleName)
continue
}
// Create/update rule
r, putErr := ss.manifest.putRule(svcRule)
if putErr != nil {
failed = true
logger.Debugf("Error occurred creating/updating rule. %v", putErr)
} else if r != nil {
actives[r] = true
}
}
// Set err if updates failed
if failed {
err = errors.New("error occurred creating/updating rules")
}
// Prune inactive rules
ss.manifest.prune(actives)
// Re-sort to fix matching priorities
ss.manifest.sort()
// Update refreshedAt timestamp
ss.manifest.mu.Lock()
ss.manifest.refreshedAt = now
ss.manifest.mu.Unlock()
return
}
// refreshTargets refreshes targets for sampling rules. It calls the XRay service proxy with sampling
// statistics for the previous interval and receives targets for the next interval.
func (ss *CentralizedStrategy) refreshTargets() (err error) {
// Explicitly recover from panics since this is the entry point for a long-running goroutine
// and we can not allow a panic to propagate to customer code.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%v", r)
}
}()
// Flag indicating batch failure
failed := false
// Flag indicating whether or not manifest should be refreshed
refresh := false
// Generate sampling statistics
statistics := ss.snapshots()
// Do not refresh targets if no statistics to report
if len(statistics) == 0 {
logger.Debugf("No statistics to report. Not refreshing sampling targets.")
return nil
}
// Get sampling targets
output, err := ss.proxy.GetSamplingTargets(statistics)
if err != nil {
return
}
// Update sampling targets
for _, t := range output.SamplingTargetDocuments {
if err = ss.updateTarget(t); err != nil {
failed = true
logger.Debugf("Error occurred updating target for rule. %v", err)
}
}
// Consume unprocessed statistics messages
for _, s := range output.UnprocessedStatistics {
logger.Debugf(
"Error occurred updating sampling target for rule: %s, code: %s, message: %s",
s.RuleName,
s.ErrorCode,
s.Message,
)
// Do not set any flags if error is unknown
if s.ErrorCode == nil || s.RuleName == nil {
continue
}
// Set batch failure if any sampling statistics return 5xx
if strings.HasPrefix(*s.ErrorCode, "5") {
failed = true
}
// Set refresh flag if any sampling statistics return 4xx
if strings.HasPrefix(*s.ErrorCode, "4") {
refresh = true
}
}
// Set err if updates failed
if failed {
err = errors.New("error occurred updating sampling targets")
} else {
logger.Debug("Successfully refreshed sampling targets")
}
// Set refresh flag if modifiedAt timestamp from remote is greater than ours.
if remote := output.LastRuleModification; remote != nil {
ss.manifest.mu.RLock()
local := ss.manifest.refreshedAt
ss.manifest.mu.RUnlock()
if remote.Unix() >= local {
refresh = true
}
}
// Perform out-of-band async manifest refresh if flag is set
if refresh {
logger.Infof("Refreshing sampling rules out-of-band.")
go func() {
if err := ss.refreshManifest(); err != nil {
logger.Debugf("Error occurred refreshing sampling rules out-of-band. %v", err)
}
}()
}
return
}
// samplingStatistics takes a snapshot of sampling statistics from all rules, resetting
// statistics counters in the process.
func (ss *CentralizedStrategy) snapshots() []*xraySvc.SamplingStatisticsDocument {
now := ss.clock.Now().Unix()
ss.manifest.mu.RLock()
defer ss.manifest.mu.RUnlock()
statistics := make([]*xraySvc.SamplingStatisticsDocument, 0, len(ss.manifest.Rules)+1)
// Generate sampling statistics for user-defined rules
for _, r := range ss.manifest.Rules {
if !r.stale(now) {
continue
}
s := r.snapshot()
s.ClientID = &ss.clientID
statistics = append(statistics, s)
}
// Generate sampling statistics for default rule
if r := ss.manifest.Default; r != nil && r.stale(now) {
s := r.snapshot()
s.ClientID = &ss.clientID
statistics = append(statistics, s)
}
return statistics
}
// updateTarget updates sampling targets for the rule specified in the target struct.
func (ss *CentralizedStrategy) updateTarget(t *xraySvc.SamplingTargetDocument) (err error) {
// Pre-emptively dereference xraySvc.SamplingTarget fields and return early on nil values
// A panic in the middle of an update may leave the rule in an inconsistent state.
if t.RuleName == nil {
return errors.New("invalid sampling target. Missing rule name")
}
if t.FixedRate == nil {
return fmt.Errorf("invalid sampling target for rule %s. Missing fixed rate", *t.RuleName)
}
// Rule for given target
ss.manifest.mu.RLock()
r, ok := ss.manifest.Index[*t.RuleName]
ss.manifest.mu.RUnlock()
if !ok {
return fmt.Errorf("rule %s not found", *t.RuleName)
}
r.mu.Lock()
defer r.mu.Unlock()
r.reservoir.refreshedAt = ss.clock.Now().Unix()
// Update non-optional attributes from response
r.Rate = *t.FixedRate
// Update optional attributes from response
if t.ReservoirQuota != nil {
r.reservoir.quota = *t.ReservoirQuota
}
if t.ReservoirQuotaTTL != nil {
r.reservoir.expiresAt = t.ReservoirQuotaTTL.Unix()
}
if t.Interval != nil {
r.reservoir.interval = *t.Interval
}
return nil
}
// LoadDaemonEndpoints configures proxy with the provided endpoint.
func (ss *CentralizedStrategy) LoadDaemonEndpoints(endpoints *daemoncfg.DaemonEndpoints) {
ss.daemonEndpoints = endpoints
}
| 522 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package sampling
import (
"fmt"
"sort"
"strings"
"sync"
xraySvc "github.com/aws/aws-sdk-go/service/xray"
"github.com/aws/aws-xray-sdk-go/utils"
)
const defaultRule = "Default"
const defaultInterval = int64(10)
const manifestTTL = 3600 // Seconds
// CentralizedManifest represents a full sampling ruleset, with a list of
// custom rules and default values for incoming requests that do
// not match any of the provided rules.
type CentralizedManifest struct {
Default *CentralizedRule
Rules []*CentralizedRule
Index map[string]*CentralizedRule
refreshedAt int64
clock utils.Clock
mu sync.RWMutex
}
// putRule updates the named rule if it already exists or creates it if it does not.
// May break ordering of the sorted rules array if it creates a new rule.
func (m *CentralizedManifest) putRule(svcRule *xraySvc.SamplingRule) (r *CentralizedRule, err error) {
defer func() {
if x := recover(); x != nil {
err = fmt.Errorf("%v", x)
}
}()
name := *svcRule.RuleName
// Default rule
if name == defaultRule {
m.mu.RLock()
r = m.Default
m.mu.RUnlock()
// Update rule if already exists
if r != nil {
m.updateDefaultRule(svcRule)
return
}
// Create Default rule
r = m.createDefaultRule(svcRule)
return
}
// User-defined rule
m.mu.RLock()
r, ok := m.Index[name]
m.mu.RUnlock()
// Create rule if it does not exist
if !ok {
r = m.createUserRule(svcRule)
return
}
// Update existing rule
m.updateUserRule(r, svcRule)
return
}
// createUserRule creates a user-defined CentralizedRule, appends it to the sorted array,
// adds it to the index, and returns the newly created rule.
// Appends new rule to the sorted array which may break its ordering.
// Panics if svcRule contains nil pointers
func (m *CentralizedManifest) createUserRule(svcRule *xraySvc.SamplingRule) *CentralizedRule {
// Create CentralizedRule from xraySvc.SamplingRule
clock := &utils.DefaultClock{}
rand := &utils.DefaultRand{}
p := &Properties{
ServiceName: *svcRule.ServiceName,
HTTPMethod: *svcRule.HTTPMethod,
URLPath: *svcRule.URLPath,
FixedTarget: *svcRule.ReservoirSize,
Rate: *svcRule.FixedRate,
Host: *svcRule.Host,
}
r := &reservoir{
capacity: *svcRule.ReservoirSize,
}
cr := &CentralizedReservoir{
reservoir: r,
interval: defaultInterval,
}
csr := &CentralizedRule{
ruleName: *svcRule.RuleName,
priority: *svcRule.Priority,
reservoir: cr,
Properties: p,
serviceType: *svcRule.ServiceType,
resourceARN: *svcRule.ResourceARN,
attributes: svcRule.Attributes,
clock: clock,
rand: rand,
}
m.mu.Lock()
defer m.mu.Unlock()
// Return early if rule already exists
if r, ok := m.Index[*svcRule.RuleName]; ok {
return r
}
// Update sorted array
m.Rules = append(m.Rules, csr)
// Update index
m.Index[*svcRule.RuleName] = csr
return csr
}
// updateUserRule updates the properties of the user-defined CentralizedRule using the given
// xraySvc.SamplingRule.
// Panics if svcRule contains nil pointers.
func (m *CentralizedManifest) updateUserRule(r *CentralizedRule, svcRule *xraySvc.SamplingRule) {
// Preemptively dereference xraySvc.SamplingRule fields and panic early on nil pointers.
// A panic in the middle of an update may leave the rule in an inconsistent state.
pr := &Properties{
ServiceName: *svcRule.ServiceName,
HTTPMethod: *svcRule.HTTPMethod,
URLPath: *svcRule.URLPath,
FixedTarget: *svcRule.ReservoirSize,
Rate: *svcRule.FixedRate,
Host: *svcRule.Host,
}
p, c := *svcRule.Priority, *svcRule.ReservoirSize
r.mu.Lock()
defer r.mu.Unlock()
r.Properties = pr
r.priority = p
r.reservoir.capacity = c
r.serviceType = *svcRule.ServiceType
r.resourceARN = *svcRule.ResourceARN
r.attributes = svcRule.Attributes
}
// createDefaultRule creates a default CentralizedRule and adds it to the manifest.
// Panics if svcRule contains nil values for FixedRate and ReservoirSize.
func (m *CentralizedManifest) createDefaultRule(svcRule *xraySvc.SamplingRule) *CentralizedRule {
// Create CentralizedRule from xraySvc.SamplingRule
clock := &utils.DefaultClock{}
rand := &utils.DefaultRand{}
p := &Properties{
FixedTarget: *svcRule.ReservoirSize,
Rate: *svcRule.FixedRate,
}
r := &reservoir{
capacity: *svcRule.ReservoirSize,
}
cr := &CentralizedReservoir{
reservoir: r,
interval: defaultInterval,
}
csr := &CentralizedRule{
ruleName: *svcRule.RuleName,
reservoir: cr,
Properties: p,
clock: clock,
rand: rand,
}
m.mu.Lock()
defer m.mu.Unlock()
// Return early if rule already exists
if d := m.Default; d != nil {
return d
}
// Update manifest if rule does not exist
m.Default = csr
// Update index
m.Index[*svcRule.RuleName] = csr
return csr
}
// updateDefaultRule updates the properties of the default CentralizedRule using the given
// xraySvc.SamplingRule.
// Panics if svcRule contains nil values for FixedRate and ReservoirSize.
func (m *CentralizedManifest) updateDefaultRule(svcRule *xraySvc.SamplingRule) {
r := m.Default
// Preemptively dereference xraySvc.SamplingRule fields and panic early on nil pointers.
// A panic in the middle of an update may leave the rule in an inconsistent state.
p := &Properties{
FixedTarget: *svcRule.ReservoirSize,
Rate: *svcRule.FixedRate,
}
c := *svcRule.ReservoirSize
r.mu.Lock()
defer r.mu.Unlock()
r.Properties = p
r.reservoir.capacity = c
}
// prune removes all rules in the manifest not present in the given list of active rules.
// Preserves ordering of sorted array.
func (m *CentralizedManifest) prune(actives map[*CentralizedRule]bool) {
m.mu.Lock()
defer m.mu.Unlock()
// Iterate in reverse order to avoid adjusting index for each deleted rule
for i := len(m.Rules) - 1; i >= 0; i-- {
r := m.Rules[i]
if _, ok := actives[r]; !ok {
m.deleteRule(i)
}
}
}
// deleteRule deletes the rule from the array, and the index.
// Assumes write lock is already held.
// Preserves ordering of sorted array.
func (m *CentralizedManifest) deleteRule(idx int) {
// Remove from index
delete(m.Index, m.Rules[idx].ruleName)
// Delete by reslicing without index
a := append(m.Rules[:idx], m.Rules[idx+1:]...)
// Set pointer to nil to free capacity from underlying array
m.Rules[len(m.Rules)-1] = nil
// Assign resliced rules
m.Rules = a
}
// sort sorts the rule array first by priority and then by rule name.
func (m *CentralizedManifest) sort() {
// Comparison function
less := func(i, j int) bool {
if m.Rules[i].priority == m.Rules[j].priority {
return strings.Compare(m.Rules[i].ruleName, m.Rules[j].ruleName) < 0
}
return m.Rules[i].priority < m.Rules[j].priority
}
m.mu.Lock()
defer m.mu.Unlock()
sort.Slice(m.Rules, less)
}
// expired returns true if the manifest has not been successfully refreshed in
// 'manifestTTL' seconds.
func (m *CentralizedManifest) expired() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.refreshedAt < m.clock.Now().Unix()-manifestTTL
}
| 295 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package sampling
import (
"testing"
"github.com/stretchr/testify/assert"
xraySvc "github.com/aws/aws-sdk-go/service/xray"
"github.com/aws/aws-xray-sdk-go/utils"
)
// Assert that putRule() creates a new user-defined rule and adds to manifest
func TestCreateUserRule(t *testing.T) {
resARN := "*"
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// New xraySvc.CentralizedSamplingRule. Input to putRule().
serviceName := "www.foo.com"
httpMethod := "POST"
urlPath := "/bar/*"
reservoirSize := int64(10)
fixedRate := float64(0.05)
ruleName := "r2"
host := "local"
priority := int64(6)
serviceTye := "*"
new := &xraySvc.SamplingRule{
ServiceName: &serviceName,
HTTPMethod: &httpMethod,
URLPath: &urlPath,
ReservoirSize: &reservoirSize,
FixedRate: &fixedRate,
RuleName: &ruleName,
Priority: &priority,
Host: &host,
ServiceType: &serviceTye,
ResourceARN: &resARN,
}
// Expected centralized sampling rule
clock := &utils.DefaultClock{}
rand := &utils.DefaultRand{}
p := &Properties{
ServiceName: serviceName,
HTTPMethod: httpMethod,
URLPath: urlPath,
FixedTarget: reservoirSize,
Rate: fixedRate,
Host: host,
}
cr := &CentralizedReservoir{
reservoir: &reservoir{
capacity: 10,
},
interval: 10,
}
exp := &CentralizedRule{
reservoir: cr,
ruleName: ruleName,
priority: priority,
Properties: p,
clock: clock,
rand: rand,
serviceType: serviceTye,
resourceARN: resARN,
}
// Add to manifest, index, and sort
r2, err := m.putRule(new)
assert.Nil(t, err)
assert.Equal(t, exp, r2)
// Assert new rule is present in index
r2, ok := m.Index["r2"]
assert.True(t, ok)
assert.Equal(t, exp, r2)
// Assert new rule present at end of array. putRule() does not preserve order.
r2 = m.Rules[2]
assert.Equal(t, exp, r2)
}
// Assert that putRule() creates a new default rule and adds to manifest
func TestCreateDefaultRule(t *testing.T) {
m := &CentralizedManifest{
Index: map[string]*CentralizedRule{},
}
// New xraySvc.CentralizedSamplingRule. Input to putRule().
reservoirSize := int64(10)
fixedRate := float64(0.05)
ruleName := "Default"
new := &xraySvc.SamplingRule{
ReservoirSize: &reservoirSize,
FixedRate: &fixedRate,
RuleName: &ruleName,
}
// Expected centralized sampling rule
clock := &utils.DefaultClock{}
rand := &utils.DefaultRand{}
p := &Properties{
FixedTarget: reservoirSize,
Rate: fixedRate,
}
cr := &CentralizedReservoir{
reservoir: &reservoir{
capacity: reservoirSize,
},
interval: 10,
}
exp := &CentralizedRule{
reservoir: cr,
ruleName: ruleName,
Properties: p,
clock: clock,
rand: rand,
}
// Add to manifest
r, err := m.putRule(new)
assert.Nil(t, err)
assert.Equal(t, exp, r)
assert.Equal(t, exp, m.Default)
}
// Assert that putRule() creates a new default rule and adds to manifest
func TestUpdateDefaultRule(t *testing.T) {
clock := &utils.DefaultClock{}
rand := &utils.DefaultRand{}
// Original default sampling rule
r := &CentralizedRule{
ruleName: "Default",
Properties: &Properties{
FixedTarget: 10,
Rate: 0.05,
},
reservoir: &CentralizedReservoir{
reservoir: &reservoir{
capacity: 10,
},
},
clock: clock,
rand: rand,
}
m := &CentralizedManifest{
Default: r,
}
// Updated xraySvc.CentralizedSamplingRule. Input to putRule().
reservoirSize := int64(20)
fixedRate := float64(0.06)
ruleName := "Default"
updated := &xraySvc.SamplingRule{
ReservoirSize: &reservoirSize,
FixedRate: &fixedRate,
RuleName: &ruleName,
}
// Expected centralized sampling rule
p := &Properties{
FixedTarget: reservoirSize,
Rate: fixedRate,
}
cr := &CentralizedReservoir{
reservoir: &reservoir{
capacity: reservoirSize,
},
}
exp := &CentralizedRule{
reservoir: cr,
ruleName: ruleName,
Properties: p,
clock: clock,
rand: rand,
}
// Update default rule in manifest
r, err := m.putRule(updated)
assert.Nil(t, err)
assert.Equal(t, exp, r)
assert.Equal(t, exp, m.Default)
}
// Assert that creating a user-defined rule which already exists is a no-op
func TestCreateUserRuleNoOp(t *testing.T) {
resARN := "*"
serviceTye := ""
attributes := make(map[string]*string)
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
reservoir: &CentralizedReservoir{
reservoir: &reservoir{},
},
}
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Duplicate xraySvc.CentralizedSamplingRule. 'r3' already exists. Input to updateRule().
serviceName := "www.foo.com"
httpMethod := "POST"
urlPath := "/bar/*"
reservoirSize := int64(10)
fixedRate := float64(0.05)
ruleName := "r3"
priority := int64(6)
host := "h"
new := &xraySvc.SamplingRule{
ServiceName: &serviceName,
HTTPMethod: &httpMethod,
URLPath: &urlPath,
ReservoirSize: &reservoirSize,
FixedRate: &fixedRate,
RuleName: &ruleName,
Priority: &priority,
Host: &host,
ResourceARN: &resARN,
ServiceType: &serviceTye,
Attributes: attributes,
}
// Assert manifest has not changed
r, err := m.putRule(new)
assert.Nil(t, err)
assert.Equal(t, r3, r)
assert.Equal(t, 2, len(m.Rules))
assert.Equal(t, 2, len(m.Index))
assert.Equal(t, r1, m.Rules[0])
assert.Equal(t, r3, m.Rules[1])
}
// Assert that putRule() updates the user-defined rule in the manifest
func TestUpdateUserRule(t *testing.T) {
resARN := "*"
serviceTye := ""
attributes := make(map[string]*string)
// Original rule
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
Properties: &Properties{
ServiceName: "*.foo.com",
HTTPMethod: "GET",
URLPath: "/resource/*",
FixedTarget: 15,
Rate: 0.04,
},
reservoir: &CentralizedReservoir{
reservoir: &reservoir{
capacity: 5,
},
},
resourceARN: resARN,
serviceType: serviceTye,
attributes: attributes,
}
rules := []*CentralizedRule{r1}
index := map[string]*CentralizedRule{
"r1": r1,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Updated xraySvc.CentralizedSamplingRule. Input to updateRule().
serviceName := "www.foo.com"
httpMethod := "POST"
urlPath := "/bar/*"
reservoirSize := int64(10)
fixedRate := float64(0.05)
ruleName := "r1"
priority := int64(6)
host := "h"
updated := &xraySvc.SamplingRule{
ServiceName: &serviceName,
HTTPMethod: &httpMethod,
URLPath: &urlPath,
ReservoirSize: &reservoirSize,
FixedRate: &fixedRate,
RuleName: &ruleName,
Priority: &priority,
Host: &host,
ResourceARN: &resARN,
ServiceType: &serviceTye,
Attributes: attributes,
}
// Expected updated centralized sampling rule
p := &Properties{
ServiceName: serviceName,
HTTPMethod: httpMethod,
URLPath: urlPath,
FixedTarget: reservoirSize,
Rate: fixedRate,
Host: host,
}
cr := &CentralizedReservoir{
reservoir: &reservoir{
capacity: 10,
},
}
exp := &CentralizedRule{
reservoir: cr,
ruleName: ruleName,
priority: priority,
Properties: p,
resourceARN: resARN,
serviceType: serviceTye,
attributes: attributes,
}
// Assert that rule has been updated
r, err := m.putRule(updated)
assert.Nil(t, err)
assert.Equal(t, exp, r)
assert.Equal(t, exp, m.Index["r1"])
assert.Equal(t, exp, m.Rules[0])
assert.Equal(t, 1, len(m.Rules))
assert.Equal(t, 1, len(m.Index))
}
// Assert that putRule() recovers from panic.
func TestPutRuleRecovery(t *testing.T) {
resARN := "*"
serviceTye := ""
attributes := make(map[string]*string)
rules := []*CentralizedRule{}
index := map[string]*CentralizedRule{}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Invalid xraySvc.CentralizedSamplingRule with nil fileds. Input to putRule().
serviceName := "www.foo.com"
httpMethod := "POST"
fixedRate := float64(0.05)
ruleName := "r2"
priority := int64(6)
new := &xraySvc.SamplingRule{
ServiceName: &serviceName,
HTTPMethod: &httpMethod,
FixedRate: &fixedRate,
RuleName: &ruleName,
Priority: &priority,
ResourceARN: &resARN,
ServiceType: &serviceTye,
Attributes: attributes,
}
// Attempt to add to manifest
r, err := m.putRule(new)
assert.NotNil(t, err)
assert.Nil(t, r)
assert.Nil(t, m.Default)
// Assert index is unchanged
assert.Equal(t, 0, len(m.Index))
// Assert sorted array is unchanged
assert.Equal(t, 0, len(m.Rules))
}
// Assert that deleting a rule from the end of the array removes the rule
// and preserves ordering of the sorted array
func TestDeleteLastRule(t *testing.T) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r2 := &CentralizedRule{
ruleName: "r2",
priority: 6,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
rules := []*CentralizedRule{r1, r2, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r2": r2,
"r3": r3,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Active rules to exclude from deletion
a := map[*CentralizedRule]bool{
r1: true,
r2: true,
}
// Delete r3
m.prune(a)
// Assert size of manifest
assert.Equal(t, 2, len(m.Rules))
assert.Equal(t, 2, len(m.Index))
// Assert index consistency
_, ok := m.Index["r3"]
assert.False(t, ok)
assert.Equal(t, r1, m.Index["r1"])
assert.Equal(t, r2, m.Index["r2"])
// Assert ordering of array
assert.Equal(t, r1, m.Rules[0])
assert.Equal(t, r2, m.Rules[1])
}
// Assert that deleting a rule from the middle of the array removes the rule
// and preserves ordering of the sorted array
func TestDeleteMiddleRule(t *testing.T) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r2 := &CentralizedRule{
ruleName: "r2",
priority: 6,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
rules := []*CentralizedRule{r1, r2, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r2": r2,
"r3": r3,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Active rules to exclude from deletion
a := map[*CentralizedRule]bool{
r1: true,
r3: true,
}
// Delete r2
m.prune(a)
// Assert size of manifest
assert.Equal(t, 2, len(m.Rules))
assert.Equal(t, 2, len(m.Index))
// Assert index consistency
_, ok := m.Index["r2"]
assert.False(t, ok)
assert.Equal(t, r1, m.Index["r1"])
assert.Equal(t, r3, m.Index["r3"])
// Assert ordering of array
assert.Equal(t, r1, m.Rules[0])
assert.Equal(t, r3, m.Rules[1])
}
// Assert that deleting a rule from the beginning of the array removes the rule
// and preserves ordering of the sorted array
func TestDeleteFirstRule(t *testing.T) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r2 := &CentralizedRule{
ruleName: "r2",
priority: 6,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
rules := []*CentralizedRule{r1, r2, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r2": r2,
"r3": r3,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Active rules to exclude from deletion
a := map[*CentralizedRule]bool{
r2: true,
r3: true,
}
// Delete r1
m.prune(a)
// Assert size of manifest
assert.Equal(t, 2, len(m.Rules))
assert.Equal(t, 2, len(m.Index))
// Assert index consistency
_, ok := m.Index["r1"]
assert.False(t, ok)
assert.Equal(t, r2, m.Index["r2"])
assert.Equal(t, r3, m.Index["r3"])
// Assert ordering of array
assert.Equal(t, r2, m.Rules[0])
assert.Equal(t, r3, m.Rules[1])
}
// Assert that deleting the only rule from the array removes the rule
func TestDeleteOnlyRule(t *testing.T) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
rules := []*CentralizedRule{r1}
index := map[string]*CentralizedRule{
"r1": r1,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Active rules to exclude from deletion
a := map[*CentralizedRule]bool{}
// Delete r1
m.prune(a)
// Assert size of manifest
assert.Equal(t, 0, len(m.Rules))
assert.Equal(t, 0, len(m.Index))
// Assert index consistency
_, ok := m.Index["r1"]
assert.False(t, ok)
}
// Assert that deleting rules from an empty array does not panic
func TestDeleteEmptyRulesArray(t *testing.T) {
rules := []*CentralizedRule{}
index := map[string]*CentralizedRule{}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Active rules to exclude from deletion
a := map[*CentralizedRule]bool{}
// Delete from empty array
m.prune(a)
// Assert size of manifest
assert.Equal(t, 0, len(m.Rules))
assert.Equal(t, 0, len(m.Index))
}
// Assert that deleting all rules results in an empty array and does not panic
func TestDeleteAllRules(t *testing.T) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r2 := &CentralizedRule{
ruleName: "r2",
priority: 6,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
rules := []*CentralizedRule{r1, r2, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r2": r2,
"r3": r3,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// Active rules to exclude from deletion
a := map[*CentralizedRule]bool{}
// Delete r3
m.prune(a)
// Assert size of manifest
assert.Equal(t, 0, len(m.Rules))
assert.Equal(t, 0, len(m.Index))
}
// Assert that sorting an unsorted array results in a sorted array - check priority
func TestSort1(t *testing.T) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r2 := &CentralizedRule{
ruleName: "r2",
priority: 6,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
// Unsorted rules array
rules := []*CentralizedRule{r2, r1, r3}
m := &CentralizedManifest{
Rules: rules,
}
// Sort array
m.sort()
// Assert on order
assert.Equal(t, r1, m.Rules[0])
assert.Equal(t, r2, m.Rules[1])
assert.Equal(t, r3, m.Rules[2])
}
// Assert that sorting an unsorted array results in a sorted array - check priority and rule name
func TestSort2(t *testing.T) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r2 := &CentralizedRule{
ruleName: "r2",
priority: 5,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
// Unsorted rules array
rules := []*CentralizedRule{r2, r1, r3}
m := &CentralizedManifest{
Rules: rules,
}
// Sort array
m.sort() // r1 should precede r2
// Assert on order
assert.Equal(t, r1, m.Rules[0])
assert.Equal(t, r2, m.Rules[1])
assert.Equal(t, r3, m.Rules[2])
}
// Assert that an expired manifest is recognized as such
func TestExpired(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500003601,
}
m := &CentralizedManifest{
refreshedAt: 1500000000,
clock: clock,
}
assert.True(t, m.expired())
}
// Assert that a fresh manifest is recognized as such
func TestFresh(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500003600,
}
m := &CentralizedManifest{
refreshedAt: 1500000000,
clock: clock,
}
assert.False(t, m.expired())
}
// benchmarks
func BenchmarkCentralizedManifest_putRule(b *testing.B) {
r1 := &CentralizedRule{
ruleName: "r1",
priority: 5,
}
r3 := &CentralizedRule{
ruleName: "r3",
priority: 7,
}
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
// New xraySvc.CentralizedSamplingRule. Input to putRule().
resARN := "*"
serviceName := "www.foo.com"
httpMethod := "POST"
urlPath := "/bar/*"
reservoirSize := int64(10)
fixedRate := float64(0.05)
ruleName := "r2"
host := "local"
priority := int64(6)
serviceTye := "*"
new := &xraySvc.SamplingRule{
ServiceName: &serviceName,
HTTPMethod: &httpMethod,
URLPath: &urlPath,
ReservoirSize: &reservoirSize,
FixedRate: &fixedRate,
RuleName: &ruleName,
Priority: &priority,
Host: &host,
ServiceType: &serviceTye,
ResourceARN: &resARN,
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := m.putRule(new)
if err != nil {
return
}
}
})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.createUserRule(new)
}
})
}
| 843 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package sampling
import (
"errors"
"testing"
"time"
"github.com/aws/aws-xray-sdk-go/daemoncfg"
xraySvc "github.com/aws/aws-sdk-go/service/xray"
"github.com/aws/aws-xray-sdk-go/utils"
"github.com/stretchr/testify/assert"
)
// Mock implementation of xray service proxy. Used for unit testing.
type mockProxy struct {
samplingRules []*xraySvc.SamplingRuleRecord
samplingTargetOutput *xraySvc.GetSamplingTargetsOutput
}
func (p *mockProxy) GetSamplingRules() ([]*xraySvc.SamplingRuleRecord, error) {
if p.samplingRules == nil {
return nil, errors.New("Error encountered retrieving sampling rules")
}
return p.samplingRules, nil
}
func (p *mockProxy) GetSamplingTargets(s []*xraySvc.SamplingStatisticsDocument) (*xraySvc.GetSamplingTargetsOutput, error) {
if p.samplingTargetOutput == nil {
return nil, errors.New("Error encountered retrieving sampling targets")
}
targets := make([]*xraySvc.SamplingTargetDocument, 0, len(s))
for _, s := range s {
for _, t := range p.samplingTargetOutput.SamplingTargetDocuments {
if *t.RuleName == *s.RuleName {
targets = append(targets, t)
}
}
}
copy := *p.samplingTargetOutput
copy.SamplingTargetDocuments = targets
return ©, nil
}
func getProperties(host string, method string, url string, serviceName string, rate float64, ft int) *Properties {
return &Properties{
Host: host,
HTTPMethod: method,
URLPath: url,
ServiceName: serviceName,
Rate: rate,
FixedTarget: int64(ft),
}
}
// Assert request matches against the correct sampling rule and gets sampled
func TestShouldTracePositive1(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
rand := &utils.MockRand{
F64: 0.06,
}
host1 := "www.foo.com"
method1 := "POST"
url1 := "/resource/bar"
serviceName1 := "localhost"
servType1 := "AWS::EC2::Instance"
sr := &Request{
Host: host1,
URL: url1,
Method: method1,
ServiceName: serviceName1,
ServiceType: servType1,
}
// Sampling rules with available quotas
csr1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
expiresAt: 1500000050,
reservoir: &reservoir{
capacity: 50,
used: 8,
currentEpoch: 1500000000,
},
},
Properties: getProperties(host1, method1, url1, serviceName1, 0, 0),
serviceType: servType1,
clock: clock,
rand: rand,
}
host2 := "www.bar.com"
method2 := "POST"
url2 := "/resource/foo"
serviceName2 := ""
csr2 := &CentralizedRule{
ruleName: "r2",
reservoir: &CentralizedReservoir{
quota: 10,
expiresAt: 1500000050,
reservoir: &reservoir{
capacity: 50,
used: 8,
currentEpoch: 1500000000,
},
},
Properties: getProperties(host2, method2, url2, serviceName2, 0, 0),
clock: clock,
rand: rand,
}
rules := []*CentralizedRule{csr2, csr1}
index := map[string]*CentralizedRule{
"r1": csr1,
"r2": csr2,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
clock: clock,
}
s := &CentralizedStrategy{
manifest: m,
clock: clock,
rand: rand,
}
// Make positive sampling decision against 'r1'
sd := s.ShouldTrace(sr)
assert.True(t, sd.Sample)
assert.Equal(t, "r1", *sd.Rule)
assert.Equal(t, int64(1), csr1.requests)
assert.Equal(t, int64(1), csr1.sampled)
assert.Equal(t, int64(9), csr1.reservoir.used)
}
// Assert request matches against the correct sampling rule and gets sampled
// ServiceType set to nil since not configured or passed in the request.
// r1 is matched because we do best effort matching
func TestShouldTracePositive2(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
rand := &utils.MockRand{
F64: 0.06,
}
host1 := "www.foo.com"
method1 := "POST"
url1 := "/resource/bar"
serviceName1 := "localhost"
servType1 := "AWS::EC2::Instance"
// serviceType missing
sr := &Request{
Host: host1,
URL: url1,
Method: method1,
ServiceName: serviceName1,
}
// Sampling rules with available quotas
csr1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
expiresAt: 1500000050,
reservoir: &reservoir{
capacity: 50,
used: 8,
currentEpoch: 1500000000,
},
},
Properties: getProperties(host1, method1, url1, serviceName1, 0, 0),
serviceType: servType1,
clock: clock,
rand: rand,
}
host2 := "www.bar.com"
method2 := "POST"
url2 := "/resource/foo"
serviceName2 := ""
csr2 := &CentralizedRule{
ruleName: "r2",
reservoir: &CentralizedReservoir{
quota: 10,
expiresAt: 1500000050,
reservoir: &reservoir{
capacity: 50,
used: 8,
currentEpoch: 1500000000,
},
},
Properties: getProperties(host2, method2, url2, serviceName2, 0, 0),
clock: clock,
rand: rand,
}
rules := []*CentralizedRule{csr2, csr1}
index := map[string]*CentralizedRule{
"r1": csr1,
"r2": csr2,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
clock: clock,
}
strategy, _ := NewLocalizedStrategy()
s := &CentralizedStrategy{
manifest: m,
clock: clock,
rand: rand,
fallback: strategy,
}
// Make positive sampling decision against 'r1'
sd := s.ShouldTrace(sr)
assert.True(t, sd.Sample)
assert.Equal(t, "r1", *sd.Rule)
assert.Equal(t, int64(1), csr1.requests)
assert.Equal(t, int64(1), csr1.sampled)
assert.Equal(t, int64(9), csr1.reservoir.used)
}
// Assert request matches against the default sampling rule and gets sampled
func TestShouldTraceDefaultPositive(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
rand := &utils.MockRand{
F64: 0.06,
}
// Sampling rule with available quota
csr := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
expiresAt: 1500000050,
reservoir: &reservoir{
capacity: 50,
used: 8,
currentEpoch: 1500000000,
},
},
Properties: getProperties("www.foo.com", "POST", "/resource/bar", "", 0, 0),
clock: clock,
rand: rand,
}
// Default sampling rule
def := &CentralizedRule{
ruleName: "Default",
reservoir: &CentralizedReservoir{
quota: 10,
expiresAt: 1500000050,
reservoir: &reservoir{
capacity: 50,
used: 8,
currentEpoch: 1500000000,
},
},
clock: clock,
rand: rand,
}
rules := []*CentralizedRule{csr}
index := map[string]*CentralizedRule{
"r1": csr,
}
m := &CentralizedManifest{
Default: def,
Rules: rules,
Index: index,
refreshedAt: 1500000000,
clock: clock,
}
s := &CentralizedStrategy{
manifest: m,
clock: clock,
rand: rand,
}
sr := &Request{
Host: "www.foo.bar.com",
URL: "/resource/bat",
Method: "GET",
}
// Make positive sampling decision against 'Default' rule
sd := s.ShouldTrace(sr)
// Assert 'Default' rule was used
assert.True(t, sd.Sample)
assert.Equal(t, "Default", *sd.Rule)
assert.Equal(t, int64(1), m.Default.requests)
assert.Equal(t, int64(1), m.Default.sampled)
assert.Equal(t, int64(9), m.Default.reservoir.used)
// Assert 'r1' was not used
assert.Equal(t, int64(0), csr.requests)
assert.Equal(t, int64(0), csr.sampled)
assert.Equal(t, int64(8), csr.reservoir.used)
}
// Assert fallback strategy was used for expired manifest
func TestShouldTraceExpiredManifest(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500003601,
}
rand := &utils.MockRand{
F64: 0.05,
}
// Sampling rule with available quota
csr := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
expiresAt: 1500000050,
reservoir: &reservoir{
capacity: 50,
used: 8,
currentEpoch: 1500000000,
},
},
Properties: getProperties("www.foo.com", "POST", "/resource/bar", "", 0, 0),
clock: clock,
rand: rand,
}
rules := []*CentralizedRule{csr}
index := map[string]*CentralizedRule{
"r1": csr,
}
centralManifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
clock: clock,
}
// Local Manifest with Default rule with available reservoir.
defaultRule := &Rule{
reservoir: &Reservoir{
clock: clock,
reservoir: &reservoir{
capacity: int64(10),
used: int64(4),
currentEpoch: int64(1500003601),
},
},
Properties: &Properties{
FixedTarget: int64(10),
Rate: float64(0.05),
},
}
localManifest := &RuleManifest{
Version: 1,
Default: defaultRule,
Rules: []*Rule{},
}
fb := &LocalizedStrategy{
manifest: localManifest,
}
s := &CentralizedStrategy{
manifest: centralManifest,
fallback: fb,
clock: clock,
rand: rand,
}
sr := &Request{
Host: "www.foo.bar.com",
URL: "/resource/bar",
Method: "POST",
}
// Fallback to local sampling strategy and make positive decision
sd := s.ShouldTrace(sr)
// Assert fallback 'Default' rule was sampled
assert.True(t, sd.Sample)
assert.Nil(t, sd.Rule)
// Assert 'r1' was not used
assert.Equal(t, int64(0), csr.requests)
assert.Equal(t, int64(0), csr.sampled)
assert.Equal(t, int64(8), csr.reservoir.used)
}
// Assert that snapshots returns an array of valid sampling statistics
func TestSnapshots(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
id := "c1"
time := clock.Now()
name1 := "r1"
requests1 := int64(1000)
sampled1 := int64(100)
borrows1 := int64(5)
r1 := &CentralizedReservoir{
interval: 10,
}
csr1 := &CentralizedRule{
ruleName: name1,
requests: requests1,
sampled: sampled1,
borrows: borrows1,
usedAt: 1500000000,
reservoir: r1,
clock: clock,
}
name2 := "r2"
requests2 := int64(500)
sampled2 := int64(10)
borrows2 := int64(0)
r2 := &CentralizedReservoir{
interval: 10,
}
csr2 := &CentralizedRule{
ruleName: name2,
requests: requests2,
sampled: sampled2,
borrows: borrows2,
usedAt: 1500000000,
reservoir: r2,
clock: clock,
}
rules := []*CentralizedRule{csr1, csr2}
m := &CentralizedManifest{
Rules: rules,
}
strategy := &CentralizedStrategy{
manifest: m,
clientID: id,
clock: clock,
}
// Expected SamplingStatistics structs
ss1 := xraySvc.SamplingStatisticsDocument{
ClientID: &id,
RequestCount: &requests1,
RuleName: &name1,
SampledCount: &sampled1,
BorrowCount: &borrows1,
Timestamp: &time,
}
ss2 := xraySvc.SamplingStatisticsDocument{
ClientID: &id,
RequestCount: &requests2,
RuleName: &name2,
SampledCount: &sampled2,
BorrowCount: &borrows2,
Timestamp: &time,
}
statistics := strategy.snapshots()
assert.Equal(t, ss1, *statistics[0])
assert.Equal(t, ss2, *statistics[1])
}
// Assert that fresh and inactive rules are not included in a snapshot
func TestMixedSnapshots(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
id := "c1"
time := clock.Now()
// Stale and active rule
name1 := "r1"
requests1 := int64(1000)
sampled1 := int64(100)
borrows1 := int64(5)
r1 := &CentralizedReservoir{
interval: 20,
refreshedAt: 1499999980,
}
csr1 := &CentralizedRule{
ruleName: name1,
requests: requests1,
sampled: sampled1,
borrows: borrows1,
usedAt: 1499999999,
reservoir: r1,
clock: clock,
}
// Stale and inactive rule
name2 := "r2"
requests2 := int64(0)
sampled2 := int64(0)
borrows2 := int64(0)
r2 := &CentralizedReservoir{
interval: 20,
refreshedAt: 1499999970,
}
csr2 := &CentralizedRule{
ruleName: name2,
requests: requests2,
sampled: sampled2,
borrows: borrows2,
usedAt: 1499999999,
reservoir: r2,
clock: clock,
}
// Fresh rule
name3 := "r3"
requests3 := int64(1000)
sampled3 := int64(100)
borrows3 := int64(5)
r3 := &CentralizedReservoir{
interval: 20,
refreshedAt: 1499999990,
}
csr3 := &CentralizedRule{
ruleName: name3,
requests: requests3,
sampled: sampled3,
borrows: borrows3,
usedAt: 1499999999,
reservoir: r3,
clock: clock,
}
rules := []*CentralizedRule{csr1, csr2, csr3}
m := &CentralizedManifest{
Rules: rules,
}
strategy := &CentralizedStrategy{
manifest: m,
clientID: id,
clock: clock,
}
// Expected SamplingStatistics structs
ss1 := xraySvc.SamplingStatisticsDocument{
ClientID: &id,
RequestCount: &requests1,
RuleName: &name1,
SampledCount: &sampled1,
BorrowCount: &borrows1,
Timestamp: &time,
}
statistics := strategy.snapshots()
assert.Equal(t, 1, len(statistics))
assert.Equal(t, ss1, *statistics[0])
}
// Assert that a valid sampling target updates its rule
func TestUpdateTarget(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
// Sampling target received from centralized sampling backend
rate := float64(0.05)
quota := int64(10)
ttl := time.Unix(1500000060, 0)
name := "r1"
st := &xraySvc.SamplingTargetDocument{
FixedRate: &rate,
ReservoirQuota: "a,
ReservoirQuotaTTL: &ttl,
RuleName: &name,
}
// Sampling rule about to be updated with new target
csr := &CentralizedRule{
ruleName: "r1",
Properties: &Properties{
Rate: 0.10,
},
reservoir: &CentralizedReservoir{
quota: 8,
refreshedAt: 1499999990,
expiresAt: 1500000010,
reservoir: &reservoir{
capacity: 50,
used: 7,
currentEpoch: 1500000000,
},
},
}
rules := []*CentralizedRule{csr}
index := map[string]*CentralizedRule{
"r1": csr,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
s := &CentralizedStrategy{
manifest: m,
clock: clock,
}
err := s.updateTarget(st)
assert.Nil(t, err)
// Updated sampling rule
exp := &CentralizedRule{
ruleName: "r1",
Properties: &Properties{
Rate: 0.05,
},
reservoir: &CentralizedReservoir{
quota: 10,
refreshedAt: 1500000000,
expiresAt: 1500000060,
reservoir: &reservoir{
capacity: 50,
used: 7,
currentEpoch: 1500000000,
},
},
}
act := s.manifest.Rules[0]
assert.Equal(t, exp, act)
}
// Assert that a missing sampling rule returns an error
func TestUpdateTargetMissingRule(t *testing.T) {
// Sampling target received from centralized sampling backend
rate := float64(0.05)
quota := int64(10)
ttl := time.Unix(1500000060, 0)
name := "r1"
st := &xraySvc.SamplingTargetDocument{
FixedRate: &rate,
ReservoirQuota: "a,
ReservoirQuotaTTL: &ttl,
RuleName: &name,
}
rules := []*CentralizedRule{}
index := map[string]*CentralizedRule{}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
s := &CentralizedStrategy{
manifest: m,
}
err := s.updateTarget(st)
assert.NotNil(t, err)
}
// Assert that an invalid sampling target returns an error and does not panic
func TestUpdateTargetPanicRecovery(t *testing.T) {
// Invalid sampling target missing FixedRate.
quota := int64(10)
ttl := time.Unix(1500000060, 0)
name := "r1"
st := &xraySvc.SamplingTargetDocument{
ReservoirQuota: "a,
ReservoirQuotaTTL: &ttl,
RuleName: &name,
}
// Sampling rule about to be updated with new target
csr := &CentralizedRule{
ruleName: "r1",
Properties: &Properties{
Rate: 0.10,
},
reservoir: &CentralizedReservoir{
quota: 8,
expiresAt: 1500000010,
reservoir: &reservoir{
capacity: 50,
used: 7,
currentEpoch: 1500000000,
},
},
}
rules := []*CentralizedRule{csr}
index := map[string]*CentralizedRule{
"r1": csr,
}
m := &CentralizedManifest{
Rules: rules,
Index: index,
}
s := &CentralizedStrategy{
manifest: m,
}
err := s.updateTarget(st)
assert.NotNil(t, err)
// Unchanged sampling rule
exp := &CentralizedRule{
ruleName: "r1",
Properties: &Properties{
Rate: 0.10,
},
reservoir: &CentralizedReservoir{
quota: 8,
expiresAt: 1500000010,
reservoir: &reservoir{
capacity: 50,
used: 7,
currentEpoch: 1500000000,
},
},
}
act := s.manifest.Rules[0]
assert.Equal(t, exp, act)
}
// Assert that manifest refresh updates the manifest and leaves it in a
// consistent state.
func TestRefreshManifestRuleAddition(t *testing.T) {
serviceTye := ""
resourceARN := "*"
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{},
priority: 4,
resourceARN: resourceARN,
}
// Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 40,
Rate: 0.10,
ServiceName: "www.bar.com",
},
priority: 8,
resourceARN: resourceARN,
}
// Sorted array
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Valid no-op update for rule 'r1'
name1 := "r1"
fixedRate1 := 0.05
httpMethod1 := "POST"
priority1 := int64(4)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
Host: &serviceName1,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
},
}
// New valid rule 'r2'
name2 := "r2"
fixedRate2 := 0.04
httpMethod2 := "PUT"
priority2 := int64(5)
reservoirSize2 := int64(60)
serviceName2 := "www.fizz.com"
urlPath2 := "/resource/fizz"
version2 := int64(1)
u2 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name2,
ServiceName: &serviceName2,
URLPath: &urlPath2,
HTTPMethod: &httpMethod2,
Priority: &priority2,
ReservoirSize: &reservoirSize2,
FixedRate: &fixedRate2,
Version: &version2,
Host: &serviceName2,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
},
}
// Valid no-op update for rule 'r3'
name3 := "r3"
fixedRate3 := 0.10
httpMethod3 := "POST"
priority3 := int64(8)
reservoirSize3 := int64(40)
serviceName3 := "www.bar.com"
urlPath3 := "/resource/foo"
version3 := int64(1)
u3 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name3,
ServiceName: &serviceName3,
URLPath: &urlPath3,
HTTPMethod: &httpMethod3,
Priority: &priority3,
ReservoirSize: &reservoirSize3,
FixedRate: &fixedRate3,
Version: &version3,
Host: &serviceName3,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
},
}
// Mock proxy with updates u1, u2, and u3
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1, u2, u3},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
// Refresh manifest with updates from mock proxy
err := ss.refreshManifest()
assert.Nil(t, err)
// Expected 'r2'
r2 := &CentralizedRule{
ruleName: "r2",
reservoir: &CentralizedReservoir{
reservoir: &reservoir{
capacity: 60,
},
interval: 10,
},
Properties: &Properties{
Host: "www.fizz.com",
HTTPMethod: "PUT",
URLPath: "/resource/fizz",
FixedTarget: 60,
Rate: 0.04,
ServiceName: "www.fizz.com",
},
priority: 5,
clock: &utils.DefaultClock{},
rand: &utils.DefaultRand{},
resourceARN: resourceARN,
}
// Assert on addition of new rule
assert.Equal(t, r2, ss.manifest.Index["r2"])
assert.Equal(t, r2, ss.manifest.Rules[1])
// Assert on sorting order
assert.Equal(t, r1, ss.manifest.Rules[0])
assert.Equal(t, r2, ss.manifest.Rules[1])
assert.Equal(t, r3, ss.manifest.Rules[2])
// Assert on size of manifest
assert.Equal(t, 3, len(ss.manifest.Rules))
assert.Equal(t, 3, len(ss.manifest.Index))
// Assert on refreshedAt timestamp
assert.Equal(t, int64(1500000060), ss.manifest.refreshedAt)
}
func TestRefreshManifestRuleAdditionInvalidRule1(t *testing.T) { // ResourceARN has invalid value
serviceTye := ""
resourceARN := "XYZ" // invalid
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{},
priority: 4,
resourceARN: resourceARN,
}
// Sorted array
rules := []*CentralizedRule{r1}
index := map[string]*CentralizedRule{
"r1": r1,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Valid no-op update for rule 'r1'
name1 := "r1"
fixedRate1 := 0.05
httpMethod1 := "POST"
priority1 := int64(4)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
Host: &serviceName1,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
},
}
// Mock proxy with updates u1
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
err := ss.refreshManifest()
assert.Nil(t, err)
// Refresh manifest with updates from mock proxy
assert.Equal(t, 0, len(ss.manifest.Rules)) // Rule not added
}
func TestRefreshManifestRuleAdditionInvalidRule2(t *testing.T) { // non nil Attributes
serviceTye := ""
resourceARN := "*"
attributes := make(map[string]*string)
attributes["a"] = &resourceARN
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{},
priority: 4,
resourceARN: resourceARN,
}
// Sorted array
rules := []*CentralizedRule{r1}
index := map[string]*CentralizedRule{
"r1": r1,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Valid no-op update for rule 'r1'
name1 := "r1"
fixedRate1 := 0.05
httpMethod1 := "POST"
priority1 := int64(4)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
Host: &serviceName1,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
Attributes: attributes, // invalid
},
}
// Mock proxy with updates u1
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
err := ss.refreshManifest()
assert.Nil(t, err)
assert.Equal(t, 0, len(ss.manifest.Rules)) // rule not added
}
func TestRefreshManifestRuleAdditionInvalidRule3(t *testing.T) { // 1 valid and 1 invalid rule
serviceTye := ""
resourceARN := "*"
attributes := make(map[string]*string)
attributes["a"] = &resourceARN
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{},
priority: 4,
resourceARN: resourceARN,
}
r2 := &CentralizedRule{
ruleName: "r2",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{},
priority: 4,
resourceARN: resourceARN,
}
// Sorted array
rules := []*CentralizedRule{r1}
index := map[string]*CentralizedRule{
"r1": r1,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Valid no-op update for rule 'r1'
name1 := "r1"
fixedRate1 := 0.05
httpMethod1 := "POST"
priority1 := int64(4)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
Host: &serviceName1,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
Attributes: attributes, // invalid
},
}
name2 := "r2"
u2 := &xraySvc.SamplingRuleRecord{ // valid rule
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name2,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
Host: &serviceName1,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
},
}
// Mock proxy with updates u1
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1, u2},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
err := ss.refreshManifest()
assert.Nil(t, err)
assert.Equal(t, 1, len(ss.manifest.Rules)) // u1 not added
assert.Equal(t, r2.ruleName, ss.manifest.Rules[0].ruleName)
// Assert on refreshedAt timestamp
assert.Equal(t, int64(1500000060), ss.manifest.refreshedAt)
}
// Assert that rules missing from GetSamplingRules are pruned
func TestRefreshManifestRuleRemoval(t *testing.T) {
resARN := "*"
serviceTye := ""
attributes := make(map[string]*string)
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{
ServiceName: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 50,
Rate: 0.05,
},
priority: 4,
resourceARN: resARN,
}
// Rule 'r2'
r2 := &CentralizedRule{
ruleName: "r2",
reservoir: &CentralizedReservoir{
quota: 20,
reservoir: &reservoir{
capacity: 60,
},
},
Properties: &Properties{
ServiceName: "www.fizz.com",
HTTPMethod: "PUT",
URLPath: "/resource/fizz",
FixedTarget: 60,
Rate: 0.04,
},
priority: 5,
resourceARN: resARN,
}
// Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{
ServiceName: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 40,
Rate: 0.10,
},
priority: 8,
resourceARN: resARN,
}
// Sorted array
rules := []*CentralizedRule{r1, r2, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r2": r2,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Valid no-op update for rule 'r1'
name1 := "r1"
fixedRate1 := 0.05
httpMethod1 := "POST"
priority1 := int64(4)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
host1 := "h1"
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
ResourceARN: &resARN,
Host: &host1,
ServiceType: &serviceTye,
Attributes: attributes,
},
}
// Rule 'r2' is missing from GetSamplingRules response
// Valid no-op update for rule 'r3'
name3 := "r3"
fixedRate3 := 0.10
httpMethod3 := "POST"
priority3 := int64(8)
reservoirSize3 := int64(40)
serviceName3 := "www.bar.com"
urlPath3 := "/resource/foo"
version3 := int64(1)
host3 := "h3"
u3 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name3,
ServiceName: &serviceName3,
URLPath: &urlPath3,
HTTPMethod: &httpMethod3,
Priority: &priority3,
ReservoirSize: &reservoirSize3,
FixedRate: &fixedRate3,
Version: &version3,
ResourceARN: &resARN,
Host: &host3,
ServiceType: &serviceTye,
Attributes: attributes,
},
}
// Mock proxy with updates u1 and u3
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1, u3},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
// Refresh manifest with updates from mock proxy
err := ss.refreshManifest()
assert.Nil(t, err)
// Assert on removal of rule
assert.Equal(t, 2, len(ss.manifest.Rules))
assert.Equal(t, 2, len(ss.manifest.Index))
// Assert on sorting order
assert.Equal(t, r1, ss.manifest.Rules[0])
assert.Equal(t, r3, ss.manifest.Rules[1])
// Assert on refreshedAt timestamp
assert.Equal(t, int64(1500000060), ss.manifest.refreshedAt)
}
// Assert that an invalid rule update does not update the rule
func TestRefreshManifestInvalidRuleUpdate(t *testing.T) {
resARN := "*"
serviceTye := ""
attributes := make(map[string]*string)
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{
ServiceName: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 50,
Rate: 0.05,
},
priority: 4,
resourceARN: resARN,
}
h3 := "h3"
// Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{
ServiceName: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 40,
Rate: 0.10,
Host: h3,
},
priority: 8,
resourceARN: resARN,
}
// Sorted array
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Invalid update for rule 'r1' (missing fixedRate)
name1 := "r1"
httpMethod1 := "GET"
priority1 := int64(9)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
Version: &version1,
ResourceARN: &resARN,
ServiceType: &serviceTye,
Attributes: attributes,
},
}
// Valid update for rule 'r3'
name3 := "r3"
fixedRate3 := 0.10
httpMethod3 := "POST"
priority3 := int64(8)
reservoirSize3 := int64(40)
serviceName3 := "www.bar.com"
urlPath3 := "/resource/foo"
version3 := int64(1)
u3 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name3,
ServiceName: &serviceName3,
URLPath: &urlPath3,
HTTPMethod: &httpMethod3,
Priority: &priority3,
ReservoirSize: &reservoirSize3,
FixedRate: &fixedRate3,
Version: &version3,
ResourceARN: &resARN,
Host: &h3,
ServiceType: &serviceTye,
Attributes: attributes,
},
}
// Mock proxy with updates u1 and u3
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1, u3},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
// Refresh manifest with updates from mock proxy
err := ss.refreshManifest()
assert.NotNil(t, err)
// Assert on size of manifest
assert.Equal(t, 1, len(ss.manifest.Rules))
assert.Equal(t, 1, len(ss.manifest.Index))
// Assert on sorting order
assert.Equal(t, r3, ss.manifest.Rules[0])
// Assert on index consistency
assert.Equal(t, r3, ss.manifest.Index["r3"])
// Assert on refreshedAt timestamp not changing
assert.Equal(t, int64(1500000060), ss.manifest.refreshedAt)
}
// Assert that a new invalid rule does not get added to manifest
func TestRefreshManifestInvalidNewRule(t *testing.T) {
resARN := "*"
h := "h"
serviceTye := ""
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 40,
},
},
Properties: &Properties{
ServiceName: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 40,
Rate: 0.05,
Host: h,
},
priority: 4,
resourceARN: resARN,
}
// Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{
ServiceName: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.10,
Host: h,
},
priority: 8,
resourceARN: resARN,
}
// Sorted array
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Valid no-op update for rule 'r1'
name1 := "r1"
fixedRate1 := 0.05
httpMethod1 := "POST"
priority1 := int64(4)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
ResourceARN: &resARN,
Host: &h,
ServiceType: &serviceTye,
},
}
// New Invalid rule 'r2' (missing priority)
name2 := "r2"
fixedRate2 := 0.04
httpMethod2 := "PUT"
reservoirSize2 := int64(60)
serviceName2 := "www.fizz.com"
urlPath2 := "/resource/fizz"
version2 := int64(1)
u2 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name2,
ServiceName: &serviceName2,
URLPath: &urlPath2,
HTTPMethod: &httpMethod2,
ReservoirSize: &reservoirSize2,
FixedRate: &fixedRate2,
Version: &version2,
ResourceARN: &resARN,
},
}
// Valid no-op update for rule 'r3'
name3 := "r3"
fixedRate3 := 0.10
httpMethod3 := "POST"
priority3 := int64(8)
reservoirSize3 := int64(40)
serviceName3 := "www.bar.com"
urlPath3 := "/resource/foo"
version3 := int64(1)
u3 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name3,
ServiceName: &serviceName3,
URLPath: &urlPath3,
HTTPMethod: &httpMethod3,
Priority: &priority3,
ReservoirSize: &reservoirSize3,
FixedRate: &fixedRate3,
Version: &version3,
ResourceARN: &resARN,
Host: &h,
ServiceType: &serviceTye,
},
}
// New Invalid rule 'r4' (missing version)
name4 := "r4"
fixedRate4 := 0.04
httpMethod4 := "PUT"
priority4 := int64(8)
reservoirSize4 := int64(60)
serviceName4 := "www.fizz.com"
urlPath4 := "/resource/fizz"
u4 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name4,
ServiceName: &serviceName4,
URLPath: &urlPath4,
HTTPMethod: &httpMethod4,
Priority: &priority4,
ReservoirSize: &reservoirSize4,
FixedRate: &fixedRate4,
ResourceARN: &resARN,
},
}
// New Invalid rule 'r5' (invalid version)
name5 := "r5"
fixedRate5 := 0.04
httpMethod5 := "PUT"
priority5 := int64(8)
reservoirSize5 := int64(60)
serviceName5 := "www.fizz.com"
urlPath5 := "/resource/fizz"
version5 := int64(0)
u5 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name5,
ServiceName: &serviceName5,
URLPath: &urlPath5,
HTTPMethod: &httpMethod5,
Priority: &priority5,
ReservoirSize: &reservoirSize5,
FixedRate: &fixedRate5,
Version: &version5,
ResourceARN: &resARN,
},
}
// Mock proxy with updates u1 and u3
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1, u2, u3, u4, u5},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
// Refresh manifest with updates from mock proxy
err := ss.refreshManifest()
assert.NotNil(t, err)
// Assert on size of manifest
assert.Equal(t, 2, len(ss.manifest.Rules))
assert.Equal(t, 2, len(ss.manifest.Index))
// Assert on sorting order
assert.Equal(t, r1, ss.manifest.Rules[0])
assert.Equal(t, r3, ss.manifest.Rules[1])
// Assert on index consistency
assert.Equal(t, r1, ss.manifest.Index["r1"])
assert.Equal(t, r3, ss.manifest.Index["r3"])
// Assert on refreshedAt timestamp not changing
assert.Equal(t, int64(1500000060), ss.manifest.refreshedAt)
}
// Assert that a proxy error results in an early return
func TestRefreshManifestProxyError(t *testing.T) {
rules := []*CentralizedRule{}
index := map[string]*CentralizedRule{}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Mock proxy. Will return error.
proxy := &mockProxy{}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
// Refresh manifest with updates from mock proxy
err := ss.refreshManifest()
assert.NotNil(t, err)
// Assert on size of manifest
assert.Equal(t, 0, len(ss.manifest.Rules))
assert.Equal(t, 0, len(ss.manifest.Index))
// Assert on refreshedAt timestamp not changing
assert.Equal(t, int64(1500000000), ss.manifest.refreshedAt)
}
// Assert that valid targets from proxy result in updated quotas for sampling rules
func TestRefreshTargets(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
requests: 100,
sampled: 6,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 5,
reservoir: &reservoir{
capacity: 30,
},
expiresAt: 1500000050,
refreshedAt: 1499999990,
interval: 10,
},
Properties: &Properties{
Host: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 30,
Rate: 0.05,
},
priority: 4,
clock: clock,
}
// Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
requests: 50,
sampled: 50,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000050,
refreshedAt: 1499999990,
interval: 10,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.10,
},
priority: 8,
clock: clock,
}
// Sorted array
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1499999990,
}
// Sampling Target for 'r1'
rate1 := 0.07
quota1 := int64(3)
quotaTTL1 := time.Unix(1500000060, 0)
name1 := "r1"
t1 := &xraySvc.SamplingTargetDocument{
FixedRate: &rate1,
ReservoirQuota: "a1,
ReservoirQuotaTTL: "aTTL1,
RuleName: &name1,
}
// Sampling Target for 'r3'
rate3 := 0.11
quota3 := int64(15)
quotaTTL3 := time.Unix(1500000060, 0)
name3 := "r3"
t3 := &xraySvc.SamplingTargetDocument{
FixedRate: &rate3,
ReservoirQuota: "a3,
ReservoirQuotaTTL: "aTTL3,
RuleName: &name3,
}
// 'LastRuleModification' attribute
modifiedAt := time.Unix(1499999900, 0)
// Mock proxy with targets for 'r1' and 'r3'
proxy := &mockProxy{
samplingTargetOutput: &xraySvc.GetSamplingTargetsOutput{
LastRuleModification: &modifiedAt,
SamplingTargetDocuments: []*xraySvc.SamplingTargetDocument{
t1,
t3,
},
},
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clientID: "c1",
clock: clock,
}
// Expected state of 'r1' after refresh
expR1 := &CentralizedRule{
ruleName: "r1",
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 3,
refreshedAt: 1500000000,
reservoir: &reservoir{
capacity: 30,
},
expiresAt: 1500000060,
interval: 10,
},
Properties: &Properties{
Host: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 30,
Rate: 0.07,
},
priority: 4,
clock: clock,
}
// Expected state of 'r3' after refresh
expR3 := &CentralizedRule{
ruleName: "r3",
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 15,
refreshedAt: 1500000000,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000060,
interval: 10,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.11,
},
priority: 8,
clock: clock,
}
err := ss.refreshTargets()
assert.Nil(t, err)
// Assert on size of manifest not changing
assert.Equal(t, 2, len(ss.manifest.Rules))
assert.Equal(t, 2, len(ss.manifest.Index))
// Assert on updated sampling rules
assert.Equal(t, expR1, ss.manifest.Index["r1"])
assert.Equal(t, expR3, ss.manifest.Index["r3"])
}
func TestRefreshTargetsVariableIntervals(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
// Rule 'r1'. Interval of 20 seconds
r1 := &CentralizedRule{
ruleName: "r1",
requests: 100,
sampled: 6,
borrows: 0,
usedAt: 1499999999,
reservoir: &CentralizedReservoir{
quota: 5,
reservoir: &reservoir{
capacity: 30,
},
expiresAt: 1500000100,
refreshedAt: 1499999990,
interval: 20,
},
Properties: &Properties{
Host: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 30,
Rate: 0.05,
},
priority: 4,
clock: clock,
}
// Rule 'r3'. Interval of 30 seconds.
r3 := &CentralizedRule{
ruleName: "r3",
requests: 50,
sampled: 50,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000200,
refreshedAt: 1499999990,
interval: 30,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.10,
},
priority: 8,
clock: clock,
}
// Sorted array
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1499999990,
}
// Sampling Target for 'r1'
rate1 := 0.07
quota1 := int64(3)
quotaTTL1 := time.Unix(1500000060, 0)
name1 := "r1"
t1 := &xraySvc.SamplingTargetDocument{
FixedRate: &rate1,
ReservoirQuota: "a1,
ReservoirQuotaTTL: "aTTL1,
RuleName: &name1,
}
// Sampling Target for 'r3'
rate3 := 0.11
quota3 := int64(15)
quotaTTL3 := time.Unix(1500000060, 0)
name3 := "r3"
t3 := &xraySvc.SamplingTargetDocument{
FixedRate: &rate3,
ReservoirQuota: "a3,
ReservoirQuotaTTL: "aTTL3,
RuleName: &name3,
}
// 'LastRuleModification' attribute
modifiedAt := time.Unix(1499999900, 0)
// Mock proxy with targets for 'r1' and 'r3'
proxy := &mockProxy{
samplingTargetOutput: &xraySvc.GetSamplingTargetsOutput{
LastRuleModification: &modifiedAt,
SamplingTargetDocuments: []*xraySvc.SamplingTargetDocument{
t1,
t3,
},
},
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clientID: "c1",
clock: clock,
}
// No targets should be refreshed
err := ss.refreshTargets()
assert.Nil(t, err)
assert.Equal(t, r1, ss.manifest.Index["r1"])
assert.Equal(t, r3, ss.manifest.Index["r3"])
// Increment time to 1500000010
clock.Increment(10, 0)
// Expected state of 'r1' after refresh
expR1 := &CentralizedRule{
ruleName: "r1",
usedAt: 1499999999,
reservoir: &CentralizedReservoir{
quota: 3,
refreshedAt: 1500000010,
reservoir: &reservoir{
capacity: 30,
},
expiresAt: 1500000060,
interval: 20,
},
Properties: &Properties{
Host: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 30,
Rate: 0.07,
},
priority: 4,
clock: clock,
}
// Expected state of 'r3' after refresh
expR3 := &CentralizedRule{
ruleName: "r3",
requests: 50,
sampled: 50,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000200,
refreshedAt: 1499999990,
interval: 30,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.10,
},
priority: 8,
clock: clock,
}
// Only r1 should be refreshed
err = ss.refreshTargets()
assert.Nil(t, err)
assert.Equal(t, expR1, ss.manifest.Index["r1"])
assert.Equal(t, expR3, ss.manifest.Index["r3"])
// Increment time to 1500000020
clock.Increment(10, 0)
// Expected state of 'r3' after refresh
expR3 = &CentralizedRule{
ruleName: "r3",
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 15,
refreshedAt: 1500000020,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000060,
interval: 30,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.11,
},
priority: 8,
clock: clock,
}
// r3 should be refreshed
err = ss.refreshTargets()
assert.Nil(t, err)
// Assert on size of manifest not changing
assert.Equal(t, 2, len(ss.manifest.Rules))
assert.Equal(t, 2, len(ss.manifest.Index))
// Assert on updated sampling rules
assert.Equal(t, expR1, ss.manifest.Index["r1"])
assert.Equal(t, expR3, ss.manifest.Index["r3"])
}
// Assert that an invalid sampling target does not leave the manifest in an
// inconsistent state. The invalid sampling target should be ignored.
func TestRefreshTargetsInvalidTarget(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
requests: 100,
sampled: 6,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 5,
reservoir: &reservoir{
capacity: 30,
},
interval: 10,
expiresAt: 1500000050,
},
Properties: &Properties{
Host: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 30,
Rate: 0.05,
},
priority: 4,
clock: clock,
}
// Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
requests: 50,
sampled: 50,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000050,
interval: 10,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.10,
},
priority: 8,
clock: clock,
}
// Sorted array
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1499999990,
}
// Invalid sampling Target for 'r1' (missing fixed rate)
quota1 := int64(3)
quotaTTL1 := time.Unix(1500000060, 0)
name1 := "r1"
t1 := &xraySvc.SamplingTargetDocument{
RuleName: &name1,
ReservoirQuota: "a1,
ReservoirQuotaTTL: "aTTL1,
}
// Valid sampling Target for 'r3'
rate3 := 0.11
quota3 := int64(15)
quotaTTL3 := time.Unix(1500000060, 0)
name3 := "r3"
t3 := &xraySvc.SamplingTargetDocument{
FixedRate: &rate3,
ReservoirQuota: "a3,
ReservoirQuotaTTL: "aTTL3,
RuleName: &name3,
}
// 'LastRuleModification' attribute
modifiedAt := time.Unix(1499999900, 0)
// Mock proxy with targets for 'r1' and 'r3'
proxy := &mockProxy{
samplingTargetOutput: &xraySvc.GetSamplingTargetsOutput{
LastRuleModification: &modifiedAt,
SamplingTargetDocuments: []*xraySvc.SamplingTargetDocument{
t1,
t3,
},
},
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clientID: "c1",
clock: clock,
}
// Expected state of 'r1' after refresh.
// Unchanged except for reset counters.
expR1 := &CentralizedRule{
ruleName: "r1",
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 5,
reservoir: &reservoir{
capacity: 30,
},
expiresAt: 1500000050,
interval: 10,
},
Properties: &Properties{
Host: "www.foo.com",
HTTPMethod: "POST",
URLPath: "/resource/bar",
FixedTarget: 30,
Rate: 0.05,
},
priority: 4,
clock: clock,
}
// Expected state of 'r3' after refresh
expR3 := &CentralizedRule{
ruleName: "r3",
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 15,
refreshedAt: 1500000000,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000060,
interval: 10,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.11,
},
priority: 8,
clock: clock,
}
err := ss.refreshTargets()
assert.NotNil(t, err)
// Assert on size of manifest not changing
assert.Equal(t, 2, len(ss.manifest.Rules))
assert.Equal(t, 2, len(ss.manifest.Index))
// Assert on updated sampling rules
assert.Equal(t, expR1, ss.manifest.Index["r1"])
assert.Equal(t, expR3, ss.manifest.Index["r3"])
}
// Assert that target refresh triggers a manifest refresh if `LastRuleModification`
// attribute is greater than the manifest's refreshedAt attribute
func TestRefreshTargetsOutdatedManifest(t *testing.T) {
serviceType := ""
resARN := "*"
clock := &utils.MockClock{
NowTime: 1500000000,
}
// Existing Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
requests: 50,
sampled: 50,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000050,
interval: 10,
},
Properties: &Properties{
ServiceName: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.10,
Host: "www.bar.com",
},
priority: 8,
clock: clock,
resourceARN: resARN,
}
// Sorted array
rules := []*CentralizedRule{r3}
index := map[string]*CentralizedRule{
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1499999800,
}
// Valid sampling Target for 'r3'
rate3 := 0.11
quota3 := int64(15)
quotaTTL3 := time.Unix(1500000060, 0)
name3 := "r3"
t3 := &xraySvc.SamplingTargetDocument{
FixedRate: &rate3,
ReservoirQuota: "a3,
ReservoirQuotaTTL: "aTTL3,
RuleName: &name3,
}
// New rule 'r1'
name := "r1"
fixedRate := 0.05
httpMethod := "POST"
priority := int64(4)
reservoirSize := int64(50)
serviceName := "www.foo.com"
urlPath := "/resource/bar"
version := int64(1)
new := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name,
ServiceName: &serviceName,
URLPath: &urlPath,
HTTPMethod: &httpMethod,
Priority: &priority,
ReservoirSize: &reservoirSize,
FixedRate: &fixedRate,
Version: &version,
Host: &serviceName,
ServiceType: &serviceType,
ResourceARN: &resARN,
},
}
// 'LastRuleModification' attribute
modifiedAt := time.Unix(1499999900, 0)
// Mock proxy with `LastRuleModification` attribute and sampling rules
proxy := &mockProxy{
samplingTargetOutput: &xraySvc.GetSamplingTargetsOutput{
LastRuleModification: &modifiedAt,
SamplingTargetDocuments: []*xraySvc.SamplingTargetDocument{t3},
},
samplingRules: []*xraySvc.SamplingRuleRecord{new},
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clientID: "c1",
clock: clock,
}
err := ss.refreshTargets()
assert.Nil(t, err)
timer := time.NewTimer(1 * time.Second)
// Assert that manifest is refreshed. The refresh is async so we timeout
// after one second.
A:
for {
select {
case <-timer.C:
assert.Fail(t, "Timed out waiting for async manifest refresh")
break A
default:
// Assert that rule was added to manifest and the timestamp refreshed
ss.manifest.mu.Lock()
if len(ss.manifest.Rules) == 1 &&
len(ss.manifest.Index) == 1 &&
ss.manifest.refreshedAt == 1500000000 {
ss.manifest.mu.Unlock()
break A
}
ss.manifest.mu.Unlock()
}
}
}
// Assert that a proxy error results in an early return with the manifest unchanged.
func TestRefreshTargetsProxyError(t *testing.T) {
clock := &utils.MockClock{
NowTime: 1500000000,
}
// Existing Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
requests: 50,
sampled: 50,
borrows: 0,
usedAt: 1500000000,
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
expiresAt: 1500000050,
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 50,
Rate: 0.10,
},
priority: 8,
clock: clock,
}
// Sorted array
rules := []*CentralizedRule{r3}
index := map[string]*CentralizedRule{
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1499999800,
}
// Mock proxy. Will return error.
proxy := &mockProxy{}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clientID: "c1",
clock: clock,
}
err := ss.refreshTargets()
assert.NotNil(t, err)
// Assert on size of manifest not changing
assert.Equal(t, 1, len(ss.manifest.Rules))
assert.Equal(t, 1, len(ss.manifest.Index))
}
func TestLoadDaemonEndpoints1(t *testing.T) {
host1 := "www.foo.com"
method1 := "POST"
url1 := "/resource/bar"
serviceName1 := "localhost"
servType1 := "AWS::EC2::Instance"
sr := &Request{
Host: host1,
URL: url1,
Method: method1,
ServiceName: serviceName1,
ServiceType: servType1,
}
s, _ := NewCentralizedStrategy()
d, _ := daemoncfg.GetDaemonEndpointsFromString("127.0.0.0:3000")
s.LoadDaemonEndpoints(d)
// Make positive sampling decision against 'r1'
s.ShouldTrace(sr)
assert.Equal(t, d, s.daemonEndpoints)
}
func TestLoadDaemonEndpoints2(t *testing.T) {
host1 := "www.foo.com"
method1 := "POST"
url1 := "/resource/bar"
serviceName1 := "localhost"
servType1 := "AWS::EC2::Instance"
sr := &Request{
Host: host1,
URL: url1,
Method: method1,
ServiceName: serviceName1,
ServiceType: servType1,
}
s, _ := NewCentralizedStrategy()
s.LoadDaemonEndpoints(nil) // if nil, env variable or default endpoint is set to proxy
// Make positive sampling decision against 'r1'
s.ShouldTrace(sr)
assert.Nil(t, s.daemonEndpoints)
}
// Benchmarks
func BenchmarkCentralizedStrategy_ShouldTrace(b *testing.B) {
s, _ := NewCentralizedStrategy()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
s.ShouldTrace(&Request{})
}
})
}
func BenchmarkNewCentralizedStrategy_refreshManifest(b *testing.B) {
serviceTye := ""
resourceARN := "*"
// Valid no-op update for rule 'r1'
name1 := "r1"
fixedRate1 := 0.05
httpMethod1 := "POST"
priority1 := int64(4)
reservoirSize1 := int64(50)
serviceName1 := "www.foo.com"
urlPath1 := "/resource/bar"
version1 := int64(1)
u1 := &xraySvc.SamplingRuleRecord{
SamplingRule: &xraySvc.SamplingRule{
RuleName: &name1,
ServiceName: &serviceName1,
URLPath: &urlPath1,
HTTPMethod: &httpMethod1,
Priority: &priority1,
ReservoirSize: &reservoirSize1,
FixedRate: &fixedRate1,
Version: &version1,
Host: &serviceName1,
ServiceType: &serviceTye,
ResourceARN: &resourceARN,
},
}
// Rule 'r1'
r1 := &CentralizedRule{
ruleName: "r1",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{},
priority: 4,
resourceARN: resourceARN,
}
// Rule 'r3'
r3 := &CentralizedRule{
ruleName: "r3",
reservoir: &CentralizedReservoir{
quota: 10,
reservoir: &reservoir{
capacity: 50,
},
},
Properties: &Properties{
Host: "www.bar.com",
HTTPMethod: "POST",
URLPath: "/resource/foo",
FixedTarget: 40,
Rate: 0.10,
ServiceName: "www.bar.com",
},
priority: 8,
resourceARN: resourceARN,
}
// Sorted array
rules := []*CentralizedRule{r1, r3}
index := map[string]*CentralizedRule{
"r1": r1,
"r3": r3,
}
manifest := &CentralizedManifest{
Rules: rules,
Index: index,
refreshedAt: 1500000000,
}
// Mock proxy with updates u1, u2, and u3
proxy := &mockProxy{
samplingRules: []*xraySvc.SamplingRuleRecord{u1},
}
// Mock clock with time incremented to 60 seconds past current
// manifest refreshedAt timestamp.
clock := &utils.MockClock{
NowTime: 1500000060,
}
ss := &CentralizedStrategy{
manifest: manifest,
proxy: proxy,
clock: clock,
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err := ss.refreshManifest()
if err != nil {
return
}
}
})
}
func BenchmarkCentralizedStrategy_refreshTargets(b *testing.B) {
s, _ := NewCentralizedStrategy()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
err := s.refreshTargets()
if err != nil {
return
}
}
})
}
| 2,766 |
aws-xray-sdk-go | aws | Go | // Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package sampling
import (
"github.com/aws/aws-xray-sdk-go/internal/logger"
"github.com/aws/aws-xray-sdk-go/resources"
)
// LocalizedStrategy makes trace sampling decisions based on
// a set of rules provided in a local JSON file. Trace sampling
// decisions are made by the root node in the trace. If a
// sampling decision is made by the root service, it will be passed
// to downstream services through the trace header.
type LocalizedStrategy struct {
manifest *RuleManifest
}
// NewLocalizedStrategy initializes an instance of LocalizedStrategy
// with the default trace sampling rules. The default rules sample
// the first request per second, and 5% of requests thereafter.
func NewLocalizedStrategy() (*LocalizedStrategy, error) {
bytes, err := resources.Asset("resources/DefaultSamplingRules.json")
if err != nil {
return nil, err
}
manifest, err := ManifestFromJSONBytes(bytes)
if err != nil {
return nil, err
}
return &LocalizedStrategy{manifest: manifest}, nil
}
// NewLocalizedStrategyFromFilePath initializes an instance of
// LocalizedStrategy using a custom ruleset found at the filepath fp.
func NewLocalizedStrategyFromFilePath(fp string) (*LocalizedStrategy, error) {
manifest, err := ManifestFromFilePath(fp)
if err != nil {
return nil, err
}
return &LocalizedStrategy{manifest: manifest}, nil
}
// NewLocalizedStrategyFromJSONBytes initializes an instance of
// LocalizedStrategy using a custom ruleset provided in the json bytes b.
func NewLocalizedStrategyFromJSONBytes(b []byte) (*LocalizedStrategy, error) {
manifest, err := ManifestFromJSONBytes(b)
if err != nil {
return nil, err
}
return &LocalizedStrategy{manifest: manifest}, nil
}
// ShouldTrace consults the LocalizedStrategy's rule set to determine
// if the given request should be traced or not.
func (lss *LocalizedStrategy) ShouldTrace(rq *Request) *Decision {
logger.Debugf("Determining ShouldTrace decision for:\n\thost: %s\n\tpath: %s\n\tmethod: %s", rq.Host, rq.URL, rq.Method)
if nil != lss.manifest.Rules {
for _, r := range lss.manifest.Rules {
if r.AppliesTo(rq.Host, rq.URL, rq.Method) {
logger.Debugf("Applicable rule:\n\tfixed_target: %d\n\trate: %f\n\thost: %s\n\turl_path: %s\n\thttp_method: %s", r.FixedTarget, r.Rate, r.Host, r.URLPath, r.HTTPMethod)
return r.Sample()
}
}
}
logger.Debugf("Default rule applies:\n\tfixed_target: %d\n\trate: %f", lss.manifest.Default.FixedTarget, lss.manifest.Default.Rate)
return lss.manifest.Default.Sample()
}
| 75 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.