repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package upgradevalidations
import (
"github.com/aws/eks-anywhere/pkg/validations"
)
func New(opts *validations.Opts) *UpgradeValidations {
opts.SetDefaults()
return &UpgradeValidations{Opts: opts}
}
type UpgradeValidations struct {
Opts *validations.Opts
}
| 15 |
eks-anywhere | aws | Go | package upgradevalidations
import (
"context"
"fmt"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
// ValidateServerVersionSkew validates Kubernetes version skew between upgrades for the CLI.
func ValidateServerVersionSkew(ctx context.Context, newCluster *anywherev1.Cluster, cluster *types.Cluster, mgmtCluster *types.Cluster, kubectl validations.KubectlClient) error {
managementCluster := cluster
if !cluster.ExistingManagement {
managementCluster = mgmtCluster
}
eksaCluster, err := kubectl.GetEksaCluster(ctx, managementCluster, newCluster.Name)
if err != nil {
return fmt.Errorf("fetching old cluster: %v", err)
}
return anywherev1.ValidateKubernetesVersionSkew(newCluster, eksaCluster).ToAggregate()
}
| 26 |
eks-anywhere | aws | Go | package upgradevalidations_test
import (
"context"
"fmt"
"strings"
"testing"
"github.com/golang/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
"github.com/aws/eks-anywhere/pkg/validations/mocks"
"github.com/aws/eks-anywhere/pkg/validations/upgradevalidations"
)
func TestValidateVersionSkew(t *testing.T) {
tests := []struct {
name string
wantErr error
upgradeVersion anywherev1.KubernetesVersion
oldVersion anywherev1.KubernetesVersion
}{
{
name: "FailureTwoMinorVersions",
wantErr: fmt.Errorf("only +1 minor version skew is supported"),
upgradeVersion: anywherev1.Kube120,
oldVersion: anywherev1.Kube118,
},
{
name: "FailureMinusOneMinorVersion",
wantErr: fmt.Errorf("kubernetes version downgrade is not supported (%s) -> (%s)", anywherev1.Kube120, anywherev1.Kube119),
upgradeVersion: anywherev1.Kube119,
oldVersion: anywherev1.Kube120,
},
{
name: "SuccessSameVersion",
wantErr: nil,
upgradeVersion: anywherev1.Kube119,
oldVersion: anywherev1.Kube119,
},
{
name: "SuccessOneMinorVersion",
wantErr: nil,
upgradeVersion: anywherev1.Kube120,
oldVersion: anywherev1.Kube119,
},
}
mockCtrl := gomock.NewController(t)
k := mocks.NewMockKubectlClient(mockCtrl)
ctx := context.Background()
for _, tc := range tests {
t.Run(tc.name, func(tt *testing.T) {
newCluster := baseCluster()
newCluster.Spec.KubernetesVersion = tc.upgradeVersion
oldCluster := baseCluster()
oldCluster.Spec.KubernetesVersion = tc.oldVersion
cluster := &types.Cluster{KubeconfigFile: "test.kubeconfig"}
k.EXPECT().GetEksaCluster(ctx, cluster, newCluster.Name).Return(oldCluster, nil)
err := upgradevalidations.ValidateServerVersionSkew(ctx, newCluster, cluster, cluster, k)
if err != nil && !strings.Contains(err.Error(), tc.wantErr.Error()) {
t.Errorf("%v got = %v, \nwant %v", tc.name, err, tc.wantErr)
}
})
}
}
func baseCluster() *anywherev1.Cluster {
c := &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "mgmt",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: anywherev1.Kube121,
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &anywherev1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
},
BundlesRef: &anywherev1.BundlesRef{
Name: "bundles-1",
Namespace: constants.EksaSystemNamespace,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}},
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{Cilium: &anywherev1.CiliumConfig{}},
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
},
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.VSphereDatacenterKind,
Name: "eksa-unit-test",
},
},
}
return c
}
| 129 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/validations/upgradevalidations/upgradevalidations.go
// Package mocks is a generated GoMock package.
package mocks
| 6 |
eks-anywhere | aws | Go | package version
var gitVersion string
type Info struct {
GitVersion string
}
func Get() Info {
return Info{
GitVersion: gitVersion,
}
}
| 14 |
eks-anywhere | aws | Go | package workflow
import (
"context"
"fmt"
)
// ErrorHandler is a function called when a workflow experiences an error during execution. The
// error may originate from hook execution or from a task.
type ErrorHandler func(context.Context, error)
func nopErrorHandler(context.Context, error) {}
// ErrDuplicateTaskName indicates 2 tasks with the same TaskName have been added to a workflow.
type ErrDuplicateTaskName struct {
Name TaskName
}
func (e ErrDuplicateTaskName) Error() string {
return fmt.Sprintf("duplicate task name: %v", e.Name)
}
| 22 |
eks-anywhere | aws | Go | package workflow
// HookBinder is used by hook registrars to bind tasks to be executed among the workflow's
// core task set.
type HookBinder interface {
// BindPreWorkflowHook binds a task to a workflow that is run _before_ a workflow is executed.
BindPreWorkflowHook(Task)
// BindPostWorkflowHook binds a task to a workflow that is run _after_ a workflow is executed.
BindPostWorkflowHook(Task)
// BindPreTaskHook binds task to be run _before_ the anchor task is executed.
BindPreTaskHook(anchor TaskName, task Task)
// BindPostTaskHook binds Task to be run _after_ the anchor task is executed.
BindPostTaskHook(anchor TaskName, task Task)
}
| 18 |
eks-anywhere | aws | Go | package workflow
import "context"
// TaskName uniquely identifies a task within a given workflow.
type TaskName string
// Task represents an individual step within a workflow that can be run.
type Task interface {
// RunTask executes the task. Tasks may return a context that should be used in subsequent task
// execution.
RunTask(context.Context) (context.Context, error)
}
// TaskFunc is a helper for defining inline tasks. It is used by type converting a function to
// TaskFunc.
//
// Example:
//
// workflow.TaskFunc(func(ctx context.Context) (context.Context, error) {
// return ctx, nil
// })
type TaskFunc func(context.Context) (context.Context, error)
// RunTask satisfies the Task interface.
func (fn TaskFunc) RunTask(ctx context.Context) (context.Context, error) {
return fn(ctx)
}
// namedTask associates a name with a Task in the context of a Workflow to enable hook lookup.
type namedTask struct {
Task
Name TaskName
}
| 35 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/workflow/task.go
// Package workflow_test is a generated GoMock package.
package workflow_test
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockTask is a mock of Task interface.
type MockTask struct {
ctrl *gomock.Controller
recorder *MockTaskMockRecorder
}
// MockTaskMockRecorder is the mock recorder for MockTask.
type MockTaskMockRecorder struct {
mock *MockTask
}
// NewMockTask creates a new mock instance.
func NewMockTask(ctrl *gomock.Controller) *MockTask {
mock := &MockTask{ctrl: ctrl}
mock.recorder = &MockTaskMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockTask) EXPECT() *MockTaskMockRecorder {
return m.recorder
}
// RunTask mocks base method.
func (m *MockTask) RunTask(arg0 context.Context) (context.Context, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunTask", arg0)
ret0, _ := ret[0].(context.Context)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RunTask indicates an expected call of RunTask.
func (mr *MockTaskMockRecorder) RunTask(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunTask", reflect.TypeOf((*MockTask)(nil).RunTask), arg0)
}
| 51 |
eks-anywhere | aws | Go | package workflow
import (
"context"
)
// Config is the configuration for constructing a Workflow instance.
type Config struct {
// ErrorHandler is handler called when a workflow experiences an error. The error may originate
// from hook or from a task. The original error is alwasy returned from the workflow's Execute.
// Optional. Defaults to a no-op handler.
ErrorHandler ErrorHandler
}
// Workflow defines an abstract workflow that can execute a serialized set of tasks.
type Workflow struct {
Config
// tasks are the tasks to be run as part of the core workflow.
tasks []namedTask
// taskNames is a map of tasks added with AppendTask. Its used to ensure unique task names so
// hooks aren't accidentally overwritten.
taskNames map[TaskName]struct{}
preWorkflowHooks []Task
postWorkflowHooks []Task
preTaskHooks map[TaskName][]Task
postTaskHooks map[TaskName][]Task
}
// New initializes a Workflow instance without any tasks or hooks.
func New(cfg Config) *Workflow {
if cfg.ErrorHandler == nil {
cfg.ErrorHandler = nopErrorHandler
}
wflw := &Workflow{
Config: cfg,
taskNames: make(map[TaskName]struct{}),
preTaskHooks: make(map[TaskName][]Task),
postTaskHooks: make(map[TaskName][]Task),
}
return wflw
}
// AppendTask appends t to the list of workflow tasks. Task names must be unique within a workflow.
// Duplicate names will receive an ErrDuplicateTaskName.
func (w *Workflow) AppendTask(name TaskName, t Task) error {
if _, found := w.taskNames[name]; found {
return ErrDuplicateTaskName{name}
}
w.tasks = append(w.tasks, namedTask{Task: t, Name: name})
w.taskNames[name] = struct{}{}
return nil
}
// Execute executes the workflow running any pre and post hooks registered for each task.
func (w *Workflow) Execute(ctx context.Context) error {
var err error
if ctx, err = runHooks(ctx, w.preWorkflowHooks); err != nil {
return w.handleError(ctx, err)
}
for _, task := range w.tasks {
if ctx, err = w.runPreTaskHooks(ctx, task.Name); err != nil {
return w.handleError(ctx, err)
}
if ctx, err = task.RunTask(ctx); err != nil {
return w.handleError(ctx, err)
}
if ctx, err = w.runPostTaskHooks(ctx, task.Name); err != nil {
return w.handleError(ctx, err)
}
}
if ctx, err = runHooks(ctx, w.postWorkflowHooks); err != nil {
return w.handleError(ctx, err)
}
return nil
}
// BindPreWorkflowHook implements the HookBinder interface.
func (w *Workflow) BindPreWorkflowHook(t Task) {
w.preWorkflowHooks = append(w.preWorkflowHooks, t)
}
// BindPostWorkflowHook implements the HookBinder interface.
func (w *Workflow) BindPostWorkflowHook(t Task) {
w.postWorkflowHooks = append(w.postWorkflowHooks, t)
}
// BindPreTaskHook implements the HookBinder interface.
func (w *Workflow) BindPreTaskHook(id TaskName, t Task) {
hooks := w.preTaskHooks[id]
hooks = append(hooks, t)
w.preTaskHooks[id] = hooks
}
// RunpreTaskHooks executes all pre hooks registered against TaskName in the order they were registered.
func (w *Workflow) runPreTaskHooks(ctx context.Context, id TaskName) (context.Context, error) {
if hooks, ok := w.preTaskHooks[id]; ok {
return runHooks(ctx, hooks)
}
return ctx, nil
}
// BindPostHook implements the HookBinder interface.
func (w *Workflow) BindPostTaskHook(id TaskName, t Task) {
hooks := w.postTaskHooks[id]
hooks = append(hooks, t)
w.postTaskHooks[id] = hooks
}
// RunpreTaskHooks executes all post hooks registered against TaskName in the order they were registered.
func (w *Workflow) runPostTaskHooks(ctx context.Context, id TaskName) (context.Context, error) {
if hooks, ok := w.postTaskHooks[id]; ok {
return runHooks(ctx, hooks)
}
return ctx, nil
}
func runHooks(ctx context.Context, hooks []Task) (context.Context, error) {
var err error
for _, hook := range hooks {
if ctx, err = hook.RunTask(ctx); err != nil {
return ctx, err
}
}
return ctx, nil
}
func (w *Workflow) handleError(ctx context.Context, err error) error {
w.ErrorHandler(ctx, err)
return err
}
| 142 |
eks-anywhere | aws | Go | package workflow_test
import (
"context"
"errors"
"testing"
gomock "github.com/golang/mock/gomock"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/workflow"
)
func TestWorkflowExecute(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
task1 := NewMockTask(ctrl)
runTask1 := task1.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
task2 := NewMockTask(ctrl)
runTask2 := task2.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
gomock.InOrder(runTask1, runTask2)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask("task1", task1)
g.Expect(err).ToNot(gomega.HaveOccurred())
err = wflw.AppendTask("task2", task2)
g.Expect(err).ToNot(gomega.HaveOccurred())
err = wflw.Execute(context.Background())
g.Expect(err).ToNot(gomega.HaveOccurred())
}
func TestWorkflowHooks(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
const taskName workflow.TaskName = "MockTask"
preWorkflowHook := NewMockTask(ctrl)
runPreWorkflowHook := preWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
preTaskHook := NewMockTask(ctrl)
runPreTaskHook := preTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
task := NewMockTask(ctrl)
runTask := task.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
postTaskHook := NewMockTask(ctrl)
runPostTaskHook := postTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
postWorkflowHook := NewMockTask(ctrl)
runPostWorkflowHook := postWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
gomock.InOrder(
runPreWorkflowHook,
runPreTaskHook,
runTask,
runPostTaskHook,
runPostWorkflowHook,
)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask(taskName, task)
g.Expect(err).ToNot(gomega.HaveOccurred())
wflw.BindPreWorkflowHook(preWorkflowHook)
wflw.BindPostWorkflowHook(postWorkflowHook)
wflw.BindPreTaskHook(taskName, preTaskHook)
wflw.BindPostTaskHook(taskName, postTaskHook)
err = wflw.Execute(context.Background())
g.Expect(err).ToNot(gomega.HaveOccurred())
}
func TestErroneousTask(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
const taskName workflow.TaskName = "MockTask"
expect := errors.New("expected error")
preWorkflowHook := NewMockTask(ctrl)
runPreWorkflowHook := preWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
preTaskHook := NewMockTask(ctrl)
runPreTaskHook := preTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
task1 := NewMockTask(ctrl)
runTask1 := task1.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), expect)
postTaskHook := NewMockTask(ctrl)
// Subsequent tasks after error shouldn't run.
task2 := NewMockTask(ctrl)
// These shouldn't run
postWorkflowHook := NewMockTask(ctrl)
gomock.InOrder(runPreWorkflowHook, runPreTaskHook, runTask1)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask(taskName, task1)
g.Expect(err).ToNot(gomega.HaveOccurred())
err = wflw.AppendTask("task2", task2)
g.Expect(err).ToNot(gomega.HaveOccurred())
wflw.BindPreWorkflowHook(preWorkflowHook)
wflw.BindPostWorkflowHook(postWorkflowHook)
wflw.BindPreTaskHook(taskName, preTaskHook)
wflw.BindPostTaskHook(taskName, postTaskHook)
err = wflw.Execute(context.Background())
g.Expect(err).To(gomega.HaveOccurred())
}
func TestErroneousPreWorkflowHook(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
expect := errors.New("expected error")
const taskName workflow.TaskName = "MockTask"
preWorkflowHook := NewMockTask(ctrl)
preWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(nil, expect)
preTaskHook := NewMockTask(ctrl)
task := NewMockTask(ctrl)
postTaskHook := NewMockTask(ctrl)
postWorkflowHook := NewMockTask(ctrl)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask(taskName, task)
g.Expect(err).ToNot(gomega.HaveOccurred())
wflw.BindPreWorkflowHook(preWorkflowHook)
wflw.BindPostWorkflowHook(postWorkflowHook)
wflw.BindPreTaskHook(taskName, preTaskHook)
wflw.BindPostTaskHook(taskName, postTaskHook)
err = wflw.Execute(context.Background())
g.Expect(err).To(gomega.HaveOccurred())
}
func TestErroneousPostWorkflowHook(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
expect := errors.New("expected error")
const taskName workflow.TaskName = "MockTask"
preWorkflowHook := NewMockTask(ctrl)
runPreWorkflowHook := preWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
preTaskHook := NewMockTask(ctrl)
runPreTaskHook := preTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
task := NewMockTask(ctrl)
runTask := task.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
postTaskHook := NewMockTask(ctrl)
runPostTaskHook := postTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
postWorkflowHook := NewMockTask(ctrl)
runPostWorkflowHook := postWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(nil, expect)
gomock.InOrder(
runPreWorkflowHook,
runPreTaskHook,
runTask,
runPostTaskHook,
runPostWorkflowHook,
)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask(taskName, task)
g.Expect(err).ToNot(gomega.HaveOccurred())
wflw.BindPreWorkflowHook(preWorkflowHook)
wflw.BindPostWorkflowHook(postWorkflowHook)
wflw.BindPreTaskHook(taskName, preTaskHook)
wflw.BindPostTaskHook(taskName, postTaskHook)
err = wflw.Execute(context.Background())
g.Expect(err).To(gomega.HaveOccurred())
}
func TestErroneousPreTaskHook(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
expect := errors.New("expected error")
const taskName workflow.TaskName = "MockTask"
// These shouldn't run.
postTaskHook := NewMockTask(ctrl)
postWorkflowHook := NewMockTask(ctrl)
task := NewMockTask(ctrl)
preTaskHook := NewMockTask(ctrl)
runPreTaskHook := preTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(nil, expect)
preWorkflowHook := NewMockTask(ctrl)
runPreWorkflowHook := preWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
gomock.InOrder(runPreWorkflowHook, runPreTaskHook)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask(taskName, task)
g.Expect(err).ToNot(gomega.HaveOccurred())
wflw.BindPreWorkflowHook(preWorkflowHook)
wflw.BindPostWorkflowHook(postWorkflowHook)
wflw.BindPreTaskHook(taskName, preTaskHook)
wflw.BindPostTaskHook(taskName, postTaskHook)
err = wflw.Execute(context.Background())
g.Expect(err).To(gomega.HaveOccurred())
}
func TestErroneousPostTaskHook(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
expect := errors.New("expected error")
const taskName workflow.TaskName = "MockTask"
preWorkflowHook := NewMockTask(ctrl)
runPreWorkflowHook := preWorkflowHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
preTaskHook := NewMockTask(ctrl)
runPreTaskHook := preTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
task := NewMockTask(ctrl)
runTask := task.EXPECT().
RunTask(gomock.Any()).
Return(context.Background(), nil)
postTaskHook := NewMockTask(ctrl)
runPostTaskHook := postTaskHook.EXPECT().
RunTask(gomock.Any()).
Return(nil, expect)
postWorkflowHook := NewMockTask(ctrl)
gomock.InOrder(
runPreWorkflowHook,
runPreTaskHook,
runTask,
runPostTaskHook,
)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask(taskName, task)
g.Expect(err).ToNot(gomega.HaveOccurred())
wflw.BindPreWorkflowHook(preWorkflowHook)
wflw.BindPostWorkflowHook(postWorkflowHook)
wflw.BindPreTaskHook(taskName, preTaskHook)
wflw.BindPostTaskHook(taskName, postTaskHook)
err = wflw.Execute(context.Background())
g.Expect(err).To(gomega.HaveOccurred())
}
func TestDuplicateTaskNames(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
const taskName workflow.TaskName = "MockTask"
task1 := NewMockTask(ctrl)
task2 := NewMockTask(ctrl)
wflw := workflow.New(workflow.Config{})
g.Expect(wflw).ToNot(gomega.BeNil())
err := wflw.AppendTask(taskName, task1)
g.Expect(err).ToNot(gomega.HaveOccurred())
err = wflw.AppendTask(taskName, task2)
g.Expect(err).To(gomega.HaveOccurred())
}
| 351 |
eks-anywhere | aws | Go | package management
import (
"context"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/workflow"
"github.com/aws/eks-anywhere/pkg/workflow/task/bootstrap"
"github.com/aws/eks-anywhere/pkg/workflow/task/workload"
)
// Define tasks names for each task run as part of the create cluster workflow. To aid readability
// the order of task names should be representative of the order of execution.
const (
CreateBootstrapCluster workflow.TaskName = "CreateBootstrapCluster"
CreateWorkloadCluster workflow.TaskName = "CreateWorkloadCluster"
DeleteBootstrapCluster workflow.TaskName = "DeleteBootstrapCluster"
)
// CreateClusterHookRegistrar is a Hook registrar that binds hooks to a create management cluster
// workflow.
type CreateClusterHookRegistrar interface {
RegisterCreateManagementClusterHooks(workflow.HookBinder)
}
// CreateCluster defines the configuration for a managment cluster creation workflow.
// It executes tasks in the following order:
// 1. CreateBootstrapCluster
// 2. DeleteBootstrapCluster
type CreateCluster struct {
// The spec used to construcft all other dependencies.
Spec *cluster.Spec
// CreateBootstrapOptions supplies bootstrap cluster options for creating bootstrap clusters.
CreateBootstrapClusterOptions bootstrap.OptionsRetriever
// Bootstrapper creates and destroys bootstrap clusters.
Bootstrapper bootstrap.Bootstrapper
// Cluster represents a logical cluster to be created.
Cluster workload.Cluster
// CNIInstaller installs a CNI in a Kubernetes cluster
CNIInstaller workload.CNIInstaller
// FS is a file system abstraction used to write files.
FS filewriter.FileWriter
// hookRegistrars are data structures that wish to bind runtime hooks to the workflow.
// They should be added via the WithHookRegistrar method.
hookRegistrars []CreateClusterHookRegistrar
}
// WithHookRegistrar adds a hook registrar to the create cluster workflow builder.
func (c *CreateCluster) WithHookRegistrar(registrar CreateClusterHookRegistrar) *CreateCluster {
c.hookRegistrars = append(c.hookRegistrars, registrar)
return c
}
// Run runs the create cluster workflow.
func (c CreateCluster) Run(ctx context.Context) error {
wflw, err := c.build()
if err != nil {
return err
}
return wflw.Execute(ctx)
}
func (c CreateCluster) build() (*workflow.Workflow, error) {
wflw := workflow.New(workflow.Config{})
for _, r := range c.hookRegistrars {
r.RegisterCreateManagementClusterHooks(wflw)
}
err := wflw.AppendTask(CreateBootstrapCluster, bootstrap.CreateCluster{
Spec: c.Spec,
Options: c.CreateBootstrapClusterOptions,
Bootstrapper: c.Bootstrapper,
})
if err != nil {
return nil, err
}
err = wflw.AppendTask(CreateWorkloadCluster, workload.Create{
Cluster: c.Cluster,
CNI: c.CNIInstaller,
FS: c.FS,
})
if err != nil {
return nil, err
}
err = wflw.AppendTask(DeleteBootstrapCluster, bootstrap.DeleteCluster{
Bootstrapper: c.Bootstrapper,
})
if err != nil {
return nil, err
}
return wflw, nil
}
| 105 |
eks-anywhere | aws | Go | package management
import "github.com/aws/eks-anywhere/pkg/workflow"
const (
PreDeleteClusterTaskName workflow.TaskName = "PreDeleteManagementCluster"
PostDeleteClusterTaskName workflow.TaskName = "PostDeleteManagementCluster"
)
// DeleteClusterHookRegistrar is a Hook registrar that binds hooks to a delete management cluster
// workflow.
type DeleteClusterHookRegistrar interface {
RegisterDeleteManagementClusterHooks(workflow.HookBinder)
}
// DeleteClusterBuilder defines the configuration for a management cluster deletion workflow.
type DeleteClusterBuilder struct {
HookRegistrars []DeleteClusterHookRegistrar
}
// WithHookRegistrar adds a hook registrar to the delete cluster workflow builder.
func (b *DeleteClusterBuilder) WithHookRegistrar(registrar DeleteClusterHookRegistrar) *DeleteClusterBuilder {
b.HookRegistrars = append(b.HookRegistrars, registrar)
return b
}
// Build builds the delete cluster workflow.
func (cfg *DeleteClusterBuilder) Build() (*workflow.Workflow, error) {
wflw := workflow.New(workflow.Config{})
for _, r := range cfg.HookRegistrars {
r.RegisterDeleteManagementClusterHooks(wflw)
}
// Construct and register tasks for a management cluster deletion workflow.
return wflw, nil
}
| 39 |
eks-anywhere | aws | Go | package management
import "github.com/aws/eks-anywhere/pkg/workflow"
const (
PreUpgradeClusterTaskName workflow.TaskName = "PreUpgradeManagementCluster"
PostUpgradeClusterTaskName workflow.TaskName = "PostUpgradeManagementCluster"
)
// UpgradeClusterHookRegistrar is a Hook registrar that binds hooks to an upgrade management
// cluster workflow.
type UpgradeClusterHookRegistrar interface {
RegisterUpgradeManagementClusterHooks(workflow.HookBinder)
}
// UpgradeClusterBuilder defines the configuration for a management cluster upgrade workflow.
type UpgradeClusterBuilder struct {
HookRegistrars []UpgradeClusterHookRegistrar
}
// WithHookRegistrar adds a hook registrar to the upgrade cluster workflow builder.
func (b *UpgradeClusterBuilder) WithHookRegistrar(registrar UpgradeClusterHookRegistrar) *UpgradeClusterBuilder {
b.HookRegistrars = append(b.HookRegistrars, registrar)
return b
}
// Build builds the upgrade cluster workflow.
func (cfg *UpgradeClusterBuilder) Build() (*workflow.Workflow, error) {
wflw := workflow.New(workflow.Config{})
for _, r := range cfg.HookRegistrars {
r.RegisterUpgradeManagementClusterHooks(wflw)
}
// Construct and register tasks for a management cluster upgrade workflow.
return wflw, nil
}
| 39 |
eks-anywhere | aws | Go | package bootstrap
import (
"context"
"errors"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/workflow/workflowcontext"
)
// OptionsRetriever supplies bootstrap cluster options. This is typically satisfied
// by a provider.
type OptionsRetriever interface {
BootstrapClusterOpts(*cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error)
}
// Bootstrapper creates and destroys bootstrap clusters. It is satisfied by the bootstrap package
// and exists predominently for testability.
type Bootstrapper interface {
// CreateCluster creates a new local cluster. It does not contain any EKS-A components.
CreateBootstrapCluster(
context.Context,
*cluster.Spec,
...bootstrapper.BootstrapClusterOption,
) (*types.Cluster, error)
// DeleteBootstrapCluster deletes a local cluster created with CreateCluster.
DeleteBootstrapCluster(
ctx context.Context,
cluster *types.Cluster,
operationType constants.Operation,
isForceCleanup bool,
) error
}
// CreateCluster creates a functional Kubernetes cluster that can be used to faciliate
// EKS-A operations. The bootstrap cluster is populated in the context using
// workflow.WithBootstrapCluster for subsequent tasks.
type CreateCluster struct {
// Spec is the spec to be used for bootstrapping the cluster.
Spec *cluster.Spec
// Options supplies bootstrap cluster creation options.
Options OptionsRetriever
// Bootstrapper is used to create the cluster.
Bootstrapper Bootstrapper
}
// RunTask satisfies workflow.Task.
func (t CreateCluster) RunTask(ctx context.Context) (context.Context, error) {
opts, err := t.Options.BootstrapClusterOpts(t.Spec)
if err != nil {
return ctx, err
}
cluster, err := t.Bootstrapper.CreateBootstrapCluster(ctx, t.Spec, opts...)
if err != nil {
return ctx, err
}
return workflowcontext.WithBootstrapAsManagementCluster(ctx, cluster), nil
}
// DeleteCluster deletes a bootstrap cluster. It expects the bootstrap cluster to be
// populated in the context using workflow.WithBootstrapCluster.
type DeleteCluster struct {
// Bootstrapper is used to delete the cluster.
Bootstrapper Bootstrapper
}
// RunTask satisfies workflow.Task.
func (t DeleteCluster) RunTask(ctx context.Context) (context.Context, error) {
cluster := workflowcontext.BootstrapCluster(ctx)
if cluster == nil {
return ctx, errors.New("bootstrap cluster not found in context")
}
if err := t.Bootstrapper.DeleteBootstrapCluster(ctx, cluster, constants.Create, false); err != nil {
return ctx, err
}
return ctx, nil
}
| 88 |
eks-anywhere | aws | Go | package workload
import (
"context"
"fmt"
"io"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/workflow/workflowcontext"
)
// Cluster represents a workload cluster to be created.
type Cluster interface {
// CreateAsync performs the necessary action that will eventually result in a cluster being
// created. This likely includes applying CAPI manifests to the cluster but its not the only
// thing that may be required.
CreateAsync(_ context.Context, management *types.Cluster) error
// WriteKubeconfig writes the Kuberconfig for this cluster to the io.Writer.
WriteKubeconfig(_ context.Context, _ io.Writer, management *types.Cluster) error
// WaitUntilControlPlaneAvailable blocks until the first control plane is ready node is ready.
// The node is ready when its possible to interact with the Kube API server using
// a Kubeconfig.
WaitUntilControlPlaneAvailable(_ context.Context, management *types.Cluster) error
// WaitUntilReady blocks until all nodes within the cluster are ready. Nodes are ready when
// they have joined the cluster and their Ready condition is true.
WaitUntilReady(_ context.Context, management *types.Cluster) error // GetName retrieves the cluster name.
GetName() string
}
// CNIInstaller install a CNI in a given cluster.
type CNIInstaller interface {
// Install configures a CNI for the first time in a kubernetes cluster
Install(ctx context.Context, cluster *types.Cluster) error
}
// Create creates a Kubernetes conformant cluster that is immediately usable for simple workloads.
// It expects a management cluster configuration to be available in the context.
type Create struct {
// Cluster is an abstraction of a cluster that can be created.
Cluster Cluster
// CNI is an installer of a CNI. As per Kubernetes documentation, the CNI must be installed for
// inter cluster communication.
CNI CNIInstaller
// FS is a file system abstraction providing file creation and write capabilities.
FS filewriter.FileWriter
}
// RunTask satisfies workflow.Task.
func (t Create) RunTask(ctx context.Context) (context.Context, error) {
management := workflowcontext.ManagementCluster(ctx)
if management == nil {
return nil, fmt.Errorf("no management cluster in context")
}
// Initiate the cluster creation process. This can take some time hence its an asyncronous
// operation that we interrogate for progress as needed.
if err := t.Cluster.CreateAsync(ctx, management); err != nil {
return nil, err
}
// Wait for the first control plane to be ready. Once we have the first control plane we
// assume we can write a Kubeconfig and install the CNI.
//
// Note we think this is important as the CNI is required for MachineHealthChecks to work.
if err := t.Cluster.WaitUntilControlPlaneAvailable(ctx, management); err != nil {
return nil, err
}
fh, path, err := t.FS.Create(
kubeconfig.FormatWorkloadClusterKubeconfigFilename(t.Cluster.GetName()),
filewriter.PersistentFile,
filewriter.Permission0600,
)
if err != nil {
return nil, err
}
if err := t.Cluster.WriteKubeconfig(ctx, fh, management); err != nil {
return nil, err
}
workloadCluster := &types.Cluster{
Name: t.Cluster.GetName(),
KubeconfigFile: path,
}
ctx = workflowcontext.WithWorkloadCluster(ctx, workloadCluster)
if err := t.CNI.Install(ctx, workloadCluster); err != nil {
return nil, fmt.Errorf("installing CNI in workload cluster: %v", err)
}
// Ensure we block until the cluster is completely up. This is important as the Create task
// should result in a usable cluster with all specified nodes ready.
if err := t.Cluster.WaitUntilReady(ctx, management); err != nil {
return nil, err
}
return ctx, nil
}
| 108 |
eks-anywhere | aws | Go | package workflowcontext
import (
"context"
"github.com/aws/eks-anywhere/pkg/types"
)
// bootstrapCluster is used to store and retrieve a target cluster kubeconfig.
const bootstrapCluster contextKey = "bootstrap-cluster"
// WithBootstrapCluster returns a context based on ctx containing the target cluster kubeconfig.
func WithBootstrapCluster(ctx context.Context, cluster *types.Cluster) context.Context {
return context.WithValue(ctx, bootstrapCluster, cluster)
}
// BootstrapCluster retrieves the bootstrap cluster configured in ctx or returns a nil pointer.
func BootstrapCluster(ctx context.Context) *types.Cluster {
return ctx.Value(bootstrapCluster).(*types.Cluster)
}
const managementCluster contextKey = "management-cluster"
// WithManagementCluster returns a context based on ctx containing a management cluster.
func WithManagementCluster(ctx context.Context, cluster *types.Cluster) context.Context {
return context.WithValue(ctx, managementCluster, cluster)
}
// ManagementCluster retrieves the management cluster configured in ctx or returns a nil pointer.
func ManagementCluster(ctx context.Context) *types.Cluster {
return ctx.Value(managementCluster).(*types.Cluster)
}
// workloadCluster is used to store and retrieve a target cluster kubeconfig.
const workloadCluster contextKey = "workload-cluster"
// WithWorkloadCluster returns a context based on ctx containing the target cluster kubeconfig.
func WithWorkloadCluster(ctx context.Context, cluster *types.Cluster) context.Context {
return context.WithValue(ctx, workloadCluster, cluster)
}
// WorkloadCluster retrieves the workload cluster configured in ctx or returns a nil pointer.
func WorkloadCluster(ctx context.Context) *types.Cluster {
return ctx.Value(workloadCluster).(*types.Cluster)
}
// WithBootstrapAsManagementCluster is shorthand for WithBootstrapCluster followed by
// WithManagementCluster.
func WithBootstrapAsManagementCluster(ctx context.Context, cluster *types.Cluster) context.Context {
ctx = WithBootstrapCluster(ctx, cluster)
return WithManagementCluster(ctx, cluster)
}
| 53 |
eks-anywhere | aws | Go | /*
Package workflowcontext contains utility functions for populating workflow context specific data
in a context.Context.
Data appropriate for the context includes anything that cannot be determined at time of
object construction. For example, a bootstrap cluster does not exist when executing management
workflows, therefore a Kubeconfig isn't available to communicate with the cluster so must be passed
as contextual data.
*/
package workflowcontext
| 11 |
eks-anywhere | aws | Go | package workflowcontext
// contextKey is used to create collisionless context keys.
type contextKey string
func (c contextKey) String() string {
return string(c)
}
| 9 |
eks-anywhere | aws | Go | package workload
| 2 |
eks-anywhere | aws | Go | package workload
| 2 |
eks-anywhere | aws | Go | package workload
| 2 |
eks-anywhere | aws | Go | package workload
| 2 |
eks-anywhere | aws | Go | package workflows
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermarshaller"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/task"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces"
)
type Create struct {
bootstrapper interfaces.Bootstrapper
provider providers.Provider
clusterManager interfaces.ClusterManager
gitOpsManager interfaces.GitOpsManager
writer filewriter.FileWriter
eksdInstaller interfaces.EksdInstaller
packageInstaller interfaces.PackageInstaller
}
func NewCreate(bootstrapper interfaces.Bootstrapper, provider providers.Provider,
clusterManager interfaces.ClusterManager, gitOpsManager interfaces.GitOpsManager,
writer filewriter.FileWriter, eksdInstaller interfaces.EksdInstaller,
packageInstaller interfaces.PackageInstaller,
) *Create {
return &Create{
bootstrapper: bootstrapper,
provider: provider,
clusterManager: clusterManager,
gitOpsManager: gitOpsManager,
writer: writer,
eksdInstaller: eksdInstaller,
packageInstaller: packageInstaller,
}
}
func (c *Create) Run(ctx context.Context, clusterSpec *cluster.Spec, validator interfaces.Validator, forceCleanup bool) error {
if forceCleanup {
if err := c.bootstrapper.DeleteBootstrapCluster(ctx, &types.Cluster{
Name: clusterSpec.Cluster.Name,
}, constants.Create, forceCleanup); err != nil {
return err
}
}
commandContext := &task.CommandContext{
Bootstrapper: c.bootstrapper,
Provider: c.provider,
ClusterManager: c.clusterManager,
GitOpsManager: c.gitOpsManager,
ClusterSpec: clusterSpec,
Writer: c.writer,
Validations: validator,
EksdInstaller: c.eksdInstaller,
PackageInstaller: c.packageInstaller,
}
if clusterSpec.ManagementCluster != nil {
commandContext.BootstrapCluster = clusterSpec.ManagementCluster
}
err := task.NewTaskRunner(&SetAndValidateTask{}, c.writer).RunTask(ctx, commandContext)
return err
}
// task related entities
type CreateBootStrapClusterTask struct{}
type SetAndValidateTask struct{}
type CreateWorkloadClusterTask struct{}
type InstallResourcesOnManagementTask struct{}
type InstallEksaComponentsTask struct{}
type InstallGitOpsManagerTask struct{}
type MoveClusterManagementTask struct{}
type WriteClusterConfigTask struct{}
type DeleteBootstrapClusterTask struct {
*CollectDiagnosticsTask
}
type InstallCuratedPackagesTask struct{}
// CreateBootStrapClusterTask implementation
func (s *CreateBootStrapClusterTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.BootstrapCluster != nil {
if commandContext.ClusterSpec.AWSIamConfig != nil {
logger.Info("Creating aws-iam-authenticator certificate and key pair secret on bootstrap cluster")
if err := commandContext.ClusterManager.CreateAwsIamAuthCaSecret(ctx, commandContext.BootstrapCluster, commandContext.ClusterSpec.Cluster.Name); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
}
return &CreateWorkloadClusterTask{}
}
logger.Info("Creating new bootstrap cluster")
bootstrapOptions, err := commandContext.Provider.BootstrapClusterOpts(commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return nil
}
bootstrapCluster, err := commandContext.Bootstrapper.CreateBootstrapCluster(ctx, commandContext.ClusterSpec, bootstrapOptions...)
if err != nil {
commandContext.SetError(err)
return nil
}
commandContext.BootstrapCluster = bootstrapCluster
logger.Info("Provider specific pre-capi-install-setup on bootstrap cluster")
if err = commandContext.Provider.PreCAPIInstallOnBootstrap(ctx, bootstrapCluster, commandContext.ClusterSpec); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
logger.Info("Installing cluster-api providers on bootstrap cluster")
if err = commandContext.ClusterManager.InstallCAPI(ctx, commandContext.ClusterSpec, bootstrapCluster, commandContext.Provider); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
if commandContext.ClusterSpec.AWSIamConfig != nil {
logger.Info("Creating aws-iam-authenticator certificate and key pair secret on bootstrap cluster")
if err = commandContext.ClusterManager.CreateAwsIamAuthCaSecret(ctx, bootstrapCluster, commandContext.ClusterSpec.Cluster.Name); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
}
logger.Info("Provider specific post-setup")
if err = commandContext.Provider.PostBootstrapSetup(ctx, commandContext.ClusterSpec.Cluster, bootstrapCluster); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
return &CreateWorkloadClusterTask{}
}
func (s *CreateBootStrapClusterTask) Name() string {
return "bootstrap-cluster-init"
}
func (s *CreateBootStrapClusterTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *CreateBootStrapClusterTask) Checkpoint() *task.CompletedTask {
return nil
}
// SetAndValidateTask implementation
func (s *SetAndValidateTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Performing setup and validations")
runner := validations.NewRunner()
runner.Register(s.providerValidation(ctx, commandContext)...)
runner.Register(commandContext.GitOpsManager.Validations(ctx, commandContext.ClusterSpec)...)
runner.Register(commandContext.Validations.PreflightValidations(ctx)...)
err := runner.Run()
if err != nil {
commandContext.SetError(err)
return nil
}
return &CreateBootStrapClusterTask{}
}
func (s *SetAndValidateTask) providerValidation(ctx context.Context, commandContext *task.CommandContext) []validations.Validation {
return []validations.Validation{
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: fmt.Sprintf("%s Provider setup is valid", commandContext.Provider.Name()),
Err: commandContext.Provider.SetupAndValidateCreateCluster(ctx, commandContext.ClusterSpec),
}
},
}
}
func (s *SetAndValidateTask) Name() string {
return "setup-validate"
}
func (s *SetAndValidateTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *SetAndValidateTask) Checkpoint() *task.CompletedTask {
return nil
}
// CreateWorkloadClusterTask implementation
func (s *CreateWorkloadClusterTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Creating new workload cluster")
workloadCluster, err := commandContext.ClusterManager.CreateWorkloadCluster(ctx, commandContext.BootstrapCluster, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.WorkloadCluster = workloadCluster
logger.Info("Installing networking on workload cluster")
err = commandContext.ClusterManager.InstallNetworking(ctx, workloadCluster, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.V(4).Info("Installing machine health checks on bootstrap cluster")
err = commandContext.ClusterManager.InstallMachineHealthChecks(ctx, commandContext.ClusterSpec, commandContext.BootstrapCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
if err = commandContext.ClusterManager.RunPostCreateWorkloadCluster(ctx, commandContext.BootstrapCluster, commandContext.WorkloadCluster, commandContext.ClusterSpec); err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
if commandContext.ClusterSpec.AWSIamConfig != nil {
logger.Info("Installing aws-iam-authenticator on workload cluster")
err = commandContext.ClusterManager.InstallAwsIamAuth(ctx, commandContext.BootstrapCluster, workloadCluster, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
}
if !commandContext.BootstrapCluster.ExistingManagement {
logger.Info("Creating EKS-A namespace")
err = commandContext.ClusterManager.CreateEKSANamespace(ctx, workloadCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Installing cluster-api providers on workload cluster")
err = commandContext.ClusterManager.InstallCAPI(ctx, commandContext.ClusterSpec, commandContext.WorkloadCluster, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Installing EKS-A secrets on workload cluster")
err := commandContext.Provider.UpdateSecrets(ctx, commandContext.WorkloadCluster, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
}
return &InstallResourcesOnManagementTask{}
}
func (s *CreateWorkloadClusterTask) Name() string {
return "workload-cluster-init"
}
func (s *CreateWorkloadClusterTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *CreateWorkloadClusterTask) Checkpoint() *task.CompletedTask {
return nil
}
// InstallResourcesOnManagement implementation.
func (s *InstallResourcesOnManagementTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.BootstrapCluster.ExistingManagement {
return &MoveClusterManagementTask{}
}
logger.Info("Installing resources on management cluster")
if err := commandContext.Provider.PostWorkloadInit(ctx, commandContext.WorkloadCluster, commandContext.ClusterSpec); err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &MoveClusterManagementTask{}
}
func (s *InstallResourcesOnManagementTask) Name() string {
return "install-resources-on-management-cluster"
}
func (s *InstallResourcesOnManagementTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *InstallResourcesOnManagementTask) Checkpoint() *task.CompletedTask {
return nil
}
// MoveClusterManagementTask implementation
func (s *MoveClusterManagementTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.BootstrapCluster.ExistingManagement {
return &InstallEksaComponentsTask{}
}
logger.Info("Moving cluster management from bootstrap to workload cluster")
err := commandContext.ClusterManager.MoveCAPI(ctx, commandContext.BootstrapCluster, commandContext.WorkloadCluster, commandContext.WorkloadCluster.Name, commandContext.ClusterSpec, types.WithNodeRef())
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &InstallEksaComponentsTask{}
}
func (s *MoveClusterManagementTask) Name() string {
return "capi-management-move"
}
func (s *MoveClusterManagementTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *MoveClusterManagementTask) Checkpoint() *task.CompletedTask {
return nil
}
// InstallEksaComponentsTask implementation
func (s *InstallEksaComponentsTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if !commandContext.BootstrapCluster.ExistingManagement {
logger.Info("Installing EKS-A custom components (CRD and controller) on workload cluster")
err := commandContext.ClusterManager.InstallCustomComponents(ctx, commandContext.ClusterSpec, commandContext.WorkloadCluster, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Installing EKS-D components on workload cluster")
err = commandContext.EksdInstaller.InstallEksdCRDs(ctx, commandContext.ClusterSpec, commandContext.WorkloadCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
}
logger.Info("Creating EKS-A CRDs instances on workload cluster")
datacenterConfig := commandContext.Provider.DatacenterConfig(commandContext.ClusterSpec)
machineConfigs := commandContext.Provider.MachineConfigs(commandContext.ClusterSpec)
targetCluster := commandContext.WorkloadCluster
if commandContext.BootstrapCluster.ExistingManagement {
targetCluster = commandContext.BootstrapCluster
}
err := commandContext.ClusterManager.CreateEKSAResources(ctx, targetCluster, commandContext.ClusterSpec, datacenterConfig, machineConfigs)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
err = commandContext.EksdInstaller.InstallEksdManifest(ctx, commandContext.ClusterSpec, targetCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
err = commandContext.ClusterManager.ResumeEKSAControllerReconcile(ctx, targetCluster, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &InstallGitOpsManagerTask{}
}
func (s *InstallEksaComponentsTask) Name() string {
return "eksa-components-install"
}
func (s *InstallEksaComponentsTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *InstallEksaComponentsTask) Checkpoint() *task.CompletedTask {
return nil
}
// InstallGitOpsManagerTask implementation
func (s *InstallGitOpsManagerTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Installing GitOps Toolkit on workload cluster")
err := commandContext.GitOpsManager.InstallGitOps(ctx, commandContext.WorkloadCluster, commandContext.ClusterSpec, commandContext.Provider.DatacenterConfig(commandContext.ClusterSpec), commandContext.Provider.MachineConfigs(commandContext.ClusterSpec))
if err != nil {
logger.MarkFail("Error when installing GitOps toolkits on workload cluster; EKS-A will continue with cluster creation, but GitOps will not be enabled", "error", err)
return &WriteClusterConfigTask{}
}
return &WriteClusterConfigTask{}
}
func (s *InstallGitOpsManagerTask) Name() string {
return "gitops-manager-install"
}
func (s *InstallGitOpsManagerTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *InstallGitOpsManagerTask) Checkpoint() *task.CompletedTask {
return nil
}
func (s *WriteClusterConfigTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Writing cluster config file")
err := clustermarshaller.WriteClusterConfig(commandContext.ClusterSpec, commandContext.Provider.DatacenterConfig(commandContext.ClusterSpec), commandContext.Provider.MachineConfigs(commandContext.ClusterSpec), commandContext.Writer)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &DeleteBootstrapClusterTask{}
}
func (s *WriteClusterConfigTask) Name() string {
return "write-cluster-config"
}
func (s *WriteClusterConfigTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *WriteClusterConfigTask) Checkpoint() *task.CompletedTask {
return nil
}
// DeleteBootstrapClusterTask implementation
func (s *DeleteBootstrapClusterTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if !commandContext.BootstrapCluster.ExistingManagement {
logger.Info("Deleting bootstrap cluster")
err := commandContext.Bootstrapper.DeleteBootstrapCluster(ctx, commandContext.BootstrapCluster, constants.Create, false)
if err != nil {
commandContext.SetError(err)
}
}
if commandContext.OriginalError == nil {
logger.MarkSuccess("Cluster created!")
}
return &InstallCuratedPackagesTask{}
}
func (s *DeleteBootstrapClusterTask) Name() string {
return "delete-kind-cluster"
}
func (cp *InstallCuratedPackagesTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
commandContext.PackageInstaller.InstallCuratedPackages(ctx)
return nil
}
func (cp *InstallCuratedPackagesTask) Name() string {
return "install-curated-packages"
}
func (s *InstallCuratedPackagesTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *InstallCuratedPackagesTask) Checkpoint() *task.CompletedTask {
return nil
}
| 478 |
eks-anywhere | aws | Go | package workflows_test
import (
"context"
"errors"
"fmt"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
writermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/providers"
providermocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/task"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/workflows"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces/mocks"
)
type createTestSetup struct {
t *testing.T
packageInstaller *mocks.MockPackageInstaller
bootstrapper *mocks.MockBootstrapper
clusterManager *mocks.MockClusterManager
gitOpsManager *mocks.MockGitOpsManager
provider *providermocks.MockProvider
writer *writermocks.MockFileWriter
validator *mocks.MockValidator
eksd *mocks.MockEksdInstaller
datacenterConfig providers.DatacenterConfig
machineConfigs []providers.MachineConfig
workflow *workflows.Create
ctx context.Context
clusterSpec *cluster.Spec
forceCleanup bool
bootstrapCluster *types.Cluster
workloadCluster *types.Cluster
}
func newCreateTest(t *testing.T) *createTestSetup {
mockCtrl := gomock.NewController(t)
bootstrapper := mocks.NewMockBootstrapper(mockCtrl)
clusterManager := mocks.NewMockClusterManager(mockCtrl)
gitOpsManager := mocks.NewMockGitOpsManager(mockCtrl)
provider := providermocks.NewMockProvider(mockCtrl)
writer := writermocks.NewMockFileWriter(mockCtrl)
eksd := mocks.NewMockEksdInstaller(mockCtrl)
packageInstaller := mocks.NewMockPackageInstaller(mockCtrl)
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{}
machineConfigs := []providers.MachineConfig{&v1alpha1.VSphereMachineConfig{}}
workflow := workflows.NewCreate(bootstrapper, provider, clusterManager, gitOpsManager, writer, eksd, packageInstaller)
validator := mocks.NewMockValidator(mockCtrl)
return &createTestSetup{
t: t,
bootstrapper: bootstrapper,
clusterManager: clusterManager,
gitOpsManager: gitOpsManager,
provider: provider,
writer: writer,
validator: validator,
eksd: eksd,
packageInstaller: packageInstaller,
datacenterConfig: datacenterConfig,
machineConfigs: machineConfigs,
workflow: workflow,
ctx: context.Background(),
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "cluster-name"; s.Cluster.Annotations = map[string]string{} }),
bootstrapCluster: &types.Cluster{Name: "bootstrap"},
workloadCluster: &types.Cluster{Name: "workload"},
}
}
func (c *createTestSetup) expectSetup() {
c.provider.EXPECT().SetupAndValidateCreateCluster(c.ctx, c.clusterSpec)
c.provider.EXPECT().Name()
c.gitOpsManager.EXPECT().Validations(c.ctx, c.clusterSpec)
}
func (c *createTestSetup) expectCreateBootstrap() {
opts := []bootstrapper.BootstrapClusterOption{bootstrapper.WithExtraDockerMounts()}
gomock.InOrder(
c.provider.EXPECT().BootstrapClusterOpts(c.clusterSpec).Return(opts, nil),
// Checking for not nil because in go you can't compare closures
c.bootstrapper.EXPECT().CreateBootstrapCluster(
c.ctx, c.clusterSpec, gomock.Not(gomock.Nil()),
).Return(c.bootstrapCluster, nil),
c.provider.EXPECT().PreCAPIInstallOnBootstrap(c.ctx, c.bootstrapCluster, c.clusterSpec),
c.clusterManager.EXPECT().InstallCAPI(c.ctx, c.clusterSpec, c.bootstrapCluster, c.provider),
c.provider.EXPECT().PostBootstrapSetup(c.ctx, c.clusterSpec.Cluster, c.bootstrapCluster),
)
}
func (c *createTestSetup) expectCreateWorkload() {
gomock.InOrder(
c.clusterManager.EXPECT().CreateWorkloadCluster(
c.ctx, c.bootstrapCluster, c.clusterSpec, c.provider,
).Return(c.workloadCluster, nil),
c.clusterManager.EXPECT().InstallNetworking(
c.ctx, c.workloadCluster, c.clusterSpec, c.provider,
),
c.clusterManager.EXPECT().InstallMachineHealthChecks(
c.ctx, c.clusterSpec, c.bootstrapCluster,
),
c.clusterManager.EXPECT().RunPostCreateWorkloadCluster(
c.ctx, c.bootstrapCluster, c.workloadCluster, c.clusterSpec,
),
c.clusterManager.EXPECT().CreateEKSANamespace(
c.ctx, c.workloadCluster,
),
c.clusterManager.EXPECT().InstallCAPI(
c.ctx, c.clusterSpec, c.workloadCluster, c.provider,
),
c.provider.EXPECT().UpdateSecrets(c.ctx, c.workloadCluster, c.clusterSpec),
)
}
func (c *createTestSetup) expectInstallResourcesOnManagementTask() {
gomock.InOrder(
c.provider.EXPECT().PostWorkloadInit(c.ctx, c.workloadCluster, c.clusterSpec),
)
}
func (c *createTestSetup) expectCreateWorkloadSkipCAPI() {
gomock.InOrder(
c.clusterManager.EXPECT().CreateWorkloadCluster(
c.ctx, c.bootstrapCluster, c.clusterSpec, c.provider,
).Return(c.workloadCluster, nil),
c.clusterManager.EXPECT().InstallNetworking(
c.ctx, c.workloadCluster, c.clusterSpec, c.provider,
),
c.clusterManager.EXPECT().InstallMachineHealthChecks(
c.ctx, c.clusterSpec, c.bootstrapCluster,
),
c.clusterManager.EXPECT().RunPostCreateWorkloadCluster(
c.ctx, c.bootstrapCluster, c.workloadCluster, c.clusterSpec,
),
)
c.clusterManager.EXPECT().InstallCAPI(
c.ctx, c.clusterSpec, c.workloadCluster, c.provider,
).Times(0)
c.provider.EXPECT().UpdateSecrets(c.ctx, c.workloadCluster, c.clusterSpec).Times(0)
}
func (c *createTestSetup) expectMoveManagement() {
c.clusterManager.EXPECT().MoveCAPI(
c.ctx, c.bootstrapCluster, c.workloadCluster, c.workloadCluster.Name, c.clusterSpec, gomock.Any(),
)
}
func (c *createTestSetup) skipMoveManagement() {
c.clusterManager.EXPECT().MoveCAPI(
c.ctx, c.bootstrapCluster, c.workloadCluster, gomock.Any(), c.clusterSpec,
).Times(0)
}
func (c *createTestSetup) expectInstallEksaComponents() {
gomock.InOrder(
c.clusterManager.EXPECT().InstallCustomComponents(
c.ctx, c.clusterSpec, c.workloadCluster, c.provider),
c.eksd.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.workloadCluster),
c.provider.EXPECT().DatacenterConfig(c.clusterSpec).Return(c.datacenterConfig),
c.provider.EXPECT().MachineConfigs(c.clusterSpec).Return(c.machineConfigs),
c.clusterManager.EXPECT().CreateEKSAResources(
c.ctx, c.workloadCluster, c.clusterSpec, c.datacenterConfig, c.machineConfigs,
),
c.eksd.EXPECT().InstallEksdManifest(
c.ctx, c.clusterSpec, c.workloadCluster),
c.clusterManager.EXPECT().ResumeEKSAControllerReconcile(c.ctx, c.workloadCluster, c.clusterSpec, c.provider),
)
}
func (c *createTestSetup) skipInstallEksaComponents() {
gomock.InOrder(
c.clusterManager.EXPECT().InstallCustomComponents(
c.ctx, c.clusterSpec, c.workloadCluster, c.provider).Times(0),
c.eksd.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.workloadCluster).Times(0),
c.provider.EXPECT().DatacenterConfig(c.clusterSpec).Return(c.datacenterConfig),
c.provider.EXPECT().MachineConfigs(c.clusterSpec).Return(c.machineConfigs),
c.clusterManager.EXPECT().CreateEKSAResources(
c.ctx, c.bootstrapCluster, c.clusterSpec, c.datacenterConfig, c.machineConfigs,
),
c.eksd.EXPECT().InstallEksdManifest(
c.ctx, c.clusterSpec, c.bootstrapCluster),
c.clusterManager.EXPECT().ResumeEKSAControllerReconcile(c.ctx, c.bootstrapCluster, c.clusterSpec, c.provider),
)
}
func (c *createTestSetup) expectCuratedPackagesInstallation() {
c.packageInstaller.EXPECT().InstallCuratedPackages(c.ctx).Times(1)
}
func (c *createTestSetup) expectInstallGitOpsManager() {
gomock.InOrder(
c.provider.EXPECT().DatacenterConfig(c.clusterSpec).Return(c.datacenterConfig),
c.provider.EXPECT().MachineConfigs(c.clusterSpec).Return(c.machineConfigs),
c.gitOpsManager.EXPECT().InstallGitOps(
c.ctx, c.workloadCluster, c.clusterSpec, c.datacenterConfig, c.machineConfigs),
)
}
func (c *createTestSetup) expectWriteClusterConfig() {
gomock.InOrder(
c.provider.EXPECT().DatacenterConfig(c.clusterSpec).Return(c.datacenterConfig),
c.provider.EXPECT().MachineConfigs(c.clusterSpec).Return(c.machineConfigs),
c.writer.EXPECT().Write("cluster-name-eks-a-cluster.yaml", gomock.Any(), gomock.Any()),
)
}
func (c *createTestSetup) expectDeleteBootstrap() {
c.bootstrapper.EXPECT().DeleteBootstrapCluster(c.ctx, c.bootstrapCluster, gomock.Any(), gomock.Any())
}
func (c *createTestSetup) expectNotDeleteBootstrap() {
c.bootstrapper.EXPECT().DeleteBootstrapCluster(c.ctx, c.bootstrapCluster, gomock.Any(), gomock.Any()).Times(0)
}
func (c *createTestSetup) expectInstallMHC() {
c.clusterManager.EXPECT().InstallMachineHealthChecks(
c.ctx, c.clusterSpec, c.bootstrapCluster,
)
}
func (c *createTestSetup) run() error {
return c.workflow.Run(c.ctx, c.clusterSpec, c.validator, c.forceCleanup)
}
func (c *createTestSetup) expectPreflightValidationsToPass() {
c.validator.EXPECT().PreflightValidations(c.ctx).Return(nil)
}
func TestCreateRunSuccess(t *testing.T) {
test := newCreateTest(t)
test.expectSetup()
test.expectCreateBootstrap()
test.expectCreateWorkload()
test.expectInstallResourcesOnManagementTask()
test.expectMoveManagement()
test.expectInstallEksaComponents()
test.expectInstallGitOpsManager()
test.expectWriteClusterConfig()
test.expectDeleteBootstrap()
test.expectPreflightValidationsToPass()
test.expectCuratedPackagesInstallation()
err := test.run()
if err != nil {
t.Fatalf("Create.Run() err = %v, want err = nil", err)
}
}
func TestCreateRunAWSIamConfigFail(t *testing.T) {
wantError := errors.New("test error")
test := newCreateTest(t)
// Adding AWSIAMConfig to cluster spec.
test.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{}
test.expectSetup()
test.expectPreflightValidationsToPass()
test.provider.EXPECT().BootstrapClusterOpts(test.clusterSpec).Return([]bootstrapper.BootstrapClusterOption{bootstrapper.WithExtraDockerMounts()}, nil)
test.bootstrapper.EXPECT().CreateBootstrapCluster(test.ctx, test.clusterSpec, gomock.Not(gomock.Nil())).Return(test.bootstrapCluster, nil)
test.provider.EXPECT().PreCAPIInstallOnBootstrap(test.ctx, test.bootstrapCluster, test.clusterSpec)
test.clusterManager.EXPECT().InstallCAPI(test.ctx, test.clusterSpec, test.bootstrapCluster, test.provider)
test.clusterManager.EXPECT().CreateAwsIamAuthCaSecret(test.ctx, test.bootstrapCluster, test.clusterSpec.Cluster.Name).Return(wantError)
test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster)
test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any())
if err := test.run(); err == nil {
t.Fatalf("Create.Run() err = %v, want err = %v", err, wantError)
}
}
func TestCreateRunAWSIamConfigSuccess(t *testing.T) {
test := newCreateTest(t)
// Adding AWSIAMConfig to cluster spec.
test.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{}
test.clusterManager.EXPECT().CreateAwsIamAuthCaSecret(test.ctx, test.bootstrapCluster, test.clusterSpec.Cluster.Name)
test.clusterManager.EXPECT().InstallAwsIamAuth(test.ctx, test.bootstrapCluster, test.workloadCluster, test.clusterSpec)
test.expectSetup()
test.expectCreateBootstrap()
test.expectCreateWorkload()
test.expectInstallResourcesOnManagementTask()
test.expectMoveManagement()
test.expectInstallEksaComponents()
test.expectInstallGitOpsManager()
test.expectWriteClusterConfig()
test.expectDeleteBootstrap()
// test.expectInstallMHC()
test.expectPreflightValidationsToPass()
test.expectCuratedPackagesInstallation()
err := test.run()
if err != nil {
t.Fatalf("Create.Run() err = %v, want err = nil", err)
}
}
func TestCreateRunSuccessForceCleanup(t *testing.T) {
test := newCreateTest(t)
test.forceCleanup = true
test.bootstrapper.EXPECT().DeleteBootstrapCluster(test.ctx, &types.Cluster{Name: "cluster-name"}, gomock.Any(), gomock.Any())
test.expectSetup()
test.expectCreateBootstrap()
test.expectCreateWorkload()
test.expectInstallResourcesOnManagementTask()
test.expectMoveManagement()
test.expectInstallEksaComponents()
test.expectInstallGitOpsManager()
test.expectWriteClusterConfig()
test.expectDeleteBootstrap()
test.expectPreflightValidationsToPass()
test.expectCuratedPackagesInstallation()
err := test.run()
if err != nil {
t.Fatalf("Create.Run() err = %v, want err = nil", err)
}
}
func TestCreateWorkloadClusterRunSuccess(t *testing.T) {
managementKubeconfig := "test.kubeconfig"
test := newCreateTest(t)
test.bootstrapCluster.ExistingManagement = true
test.bootstrapCluster.KubeconfigFile = managementKubeconfig
test.bootstrapCluster.Name = "cluster-name"
test.clusterSpec.ManagementCluster = &types.Cluster{
Name: test.bootstrapCluster.Name,
KubeconfigFile: managementKubeconfig,
ExistingManagement: true,
}
test.expectSetup()
test.expectCreateWorkloadSkipCAPI()
test.skipMoveManagement()
test.skipInstallEksaComponents()
test.expectInstallGitOpsManager()
test.expectWriteClusterConfig()
test.expectNotDeleteBootstrap()
// test.expectInstallMHC()
test.expectPreflightValidationsToPass()
test.expectCuratedPackagesInstallation()
if err := test.run(); err != nil {
t.Fatalf("Create.Run() err = %v, want err = nil", err)
}
}
func TestCreateWorkloadClusterRunAWSIamConfigSuccess(t *testing.T) {
managementKubeconfig := "test.kubeconfig"
test := newCreateTest(t)
test.bootstrapCluster.ExistingManagement = true
test.bootstrapCluster.KubeconfigFile = managementKubeconfig
test.bootstrapCluster.Name = "cluster-name"
test.clusterSpec.ManagementCluster = &types.Cluster{
Name: test.bootstrapCluster.Name,
KubeconfigFile: managementKubeconfig,
ExistingManagement: true,
}
// Adding AWSIAMConfig to cluster spec.
test.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{}
test.clusterManager.EXPECT().CreateAwsIamAuthCaSecret(test.ctx, test.bootstrapCluster, test.clusterSpec.Cluster.Name)
test.clusterManager.EXPECT().InstallAwsIamAuth(test.ctx, test.bootstrapCluster, test.workloadCluster, test.clusterSpec)
test.expectSetup()
test.expectCreateWorkloadSkipCAPI()
test.skipMoveManagement()
test.skipInstallEksaComponents()
test.expectInstallGitOpsManager()
test.expectWriteClusterConfig()
test.expectNotDeleteBootstrap()
// test.expectInstallMHC()
test.expectPreflightValidationsToPass()
test.expectCuratedPackagesInstallation()
if err := test.run(); err != nil {
t.Fatalf("Create.Run() err = %v, want err = nil", err)
}
}
func TestCreateWorkloadClusterRunAWSIamConfigFail(t *testing.T) {
wantError := errors.New("test error")
managementKubeconfig := "test.kubeconfig"
test := newCreateTest(t)
test.bootstrapCluster.ExistingManagement = true
test.bootstrapCluster.KubeconfigFile = managementKubeconfig
test.bootstrapCluster.Name = "cluster-name"
test.clusterSpec.ManagementCluster = &types.Cluster{
Name: test.bootstrapCluster.Name,
KubeconfigFile: managementKubeconfig,
ExistingManagement: true,
}
// Adding AWSIAMConfig to cluster spec.
test.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{}
test.expectSetup()
test.expectPreflightValidationsToPass()
test.clusterManager.EXPECT().CreateAwsIamAuthCaSecret(test.ctx, test.bootstrapCluster, test.clusterSpec.Cluster.Name).Return(wantError)
test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster)
test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any())
if err := test.run(); err == nil {
t.Fatalf("Create.Run() err = %v, want err = %v", err, wantError)
}
}
func TestCreateWorkloadClusterTaskCreateWorkloadClusterFailure(t *testing.T) {
test := newCreateTest(t)
commandContext := task.CommandContext{
BootstrapCluster: test.bootstrapCluster,
ClusterSpec: test.clusterSpec,
Provider: test.provider,
ClusterManager: test.clusterManager,
}
gomock.InOrder(
test.clusterManager.EXPECT().CreateWorkloadCluster(
test.ctx, test.bootstrapCluster, test.clusterSpec, test.provider,
).Return(nil, errors.New("test")),
test.clusterManager.EXPECT().SaveLogsManagementCluster(
test.ctx, test.clusterSpec, test.bootstrapCluster,
),
test.clusterManager.EXPECT().SaveLogsWorkloadCluster(
test.ctx, test.provider, test.clusterSpec, nil,
),
test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any()),
)
err := task.NewTaskRunner(&workflows.CreateWorkloadClusterTask{}, test.writer).RunTask(test.ctx, &commandContext)
if err == nil {
t.Fatalf("expected error from task")
}
}
func TestCreateWorkloadClusterTaskRunPostCreateWorkloadClusterFailure(t *testing.T) {
test := newCreateTest(t)
commandContext := task.CommandContext{
BootstrapCluster: test.bootstrapCluster,
ClusterSpec: test.clusterSpec,
Provider: test.provider,
ClusterManager: test.clusterManager,
}
gomock.InOrder(
test.clusterManager.EXPECT().CreateWorkloadCluster(
test.ctx, test.bootstrapCluster, test.clusterSpec, test.provider,
).Return(test.workloadCluster, nil),
test.clusterManager.EXPECT().InstallNetworking(
test.ctx, test.workloadCluster, test.clusterSpec, test.provider,
),
test.clusterManager.EXPECT().InstallMachineHealthChecks(
test.ctx, test.clusterSpec, test.bootstrapCluster,
),
test.clusterManager.EXPECT().RunPostCreateWorkloadCluster(
test.ctx, test.bootstrapCluster, test.workloadCluster, test.clusterSpec,
).Return(errors.New("test")),
test.clusterManager.EXPECT().SaveLogsManagementCluster(
test.ctx, test.clusterSpec, test.bootstrapCluster,
),
test.clusterManager.EXPECT().SaveLogsWorkloadCluster(
test.ctx, test.provider, test.clusterSpec, test.workloadCluster,
),
test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any()),
)
err := task.NewTaskRunner(&workflows.CreateWorkloadClusterTask{}, test.writer).RunTask(test.ctx, &commandContext)
if err == nil {
t.Fatalf("expected error from task")
}
}
| 499 |
eks-anywhere | aws | Go | package workflows
import (
"context"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/task"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces"
)
type Delete struct {
bootstrapper interfaces.Bootstrapper
provider providers.Provider
clusterManager interfaces.ClusterManager
gitOpsManager interfaces.GitOpsManager
writer filewriter.FileWriter
}
func NewDelete(bootstrapper interfaces.Bootstrapper, provider providers.Provider,
clusterManager interfaces.ClusterManager, gitOpsManager interfaces.GitOpsManager,
writer filewriter.FileWriter,
) *Delete {
return &Delete{
bootstrapper: bootstrapper,
provider: provider,
clusterManager: clusterManager,
gitOpsManager: gitOpsManager,
writer: writer,
}
}
func (c *Delete) Run(ctx context.Context, workloadCluster *types.Cluster, clusterSpec *cluster.Spec, forceCleanup bool, kubeconfig string) error {
if forceCleanup {
if err := c.bootstrapper.DeleteBootstrapCluster(ctx, &types.Cluster{
Name: workloadCluster.Name,
}, constants.Delete, forceCleanup); err != nil {
return err
}
}
commandContext := &task.CommandContext{
Bootstrapper: c.bootstrapper,
Provider: c.provider,
ClusterManager: c.clusterManager,
GitOpsManager: c.gitOpsManager,
WorkloadCluster: workloadCluster,
ClusterSpec: clusterSpec,
}
if clusterSpec.ManagementCluster != nil {
commandContext.BootstrapCluster = clusterSpec.ManagementCluster
}
return task.NewTaskRunner(&setupAndValidate{}, c.writer).RunTask(ctx, commandContext)
}
type setupAndValidate struct{}
type createManagementCluster struct{}
type installCAPI struct{}
type moveClusterManagement struct{}
type deleteWorkloadCluster struct{}
type cleanupGitRepo struct{}
type deletePackageResources struct{}
type deleteManagementCluster struct{}
func (s *setupAndValidate) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Performing provider setup and validations")
err := commandContext.Provider.SetupAndValidateDeleteCluster(ctx, commandContext.WorkloadCluster, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return nil
}
return &createManagementCluster{}
}
func (s *setupAndValidate) Name() string {
return "setup-and-validate"
}
func (s *setupAndValidate) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *setupAndValidate) Checkpoint() *task.CompletedTask {
return nil
}
func (s *createManagementCluster) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.BootstrapCluster != nil && commandContext.BootstrapCluster.ExistingManagement {
return &deleteWorkloadCluster{}
}
logger.Info("Creating management cluster")
bootstrapOptions, err := commandContext.Provider.BootstrapClusterOpts(commandContext.ClusterSpec)
if err != nil {
logger.Error(err, "Error getting management options from provider")
commandContext.SetError(err)
return nil
}
bootstrapCluster, err := commandContext.Bootstrapper.CreateBootstrapCluster(ctx, commandContext.ClusterSpec, bootstrapOptions...)
if err != nil {
commandContext.SetError(err)
return &deleteManagementCluster{}
}
commandContext.BootstrapCluster = bootstrapCluster
logger.Info("Provider specific pre-capi-install-setup on bootstrap cluster")
if err = commandContext.Provider.PreCAPIInstallOnBootstrap(ctx, bootstrapCluster, commandContext.ClusterSpec); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
return &installCAPI{}
}
func (s *createManagementCluster) Name() string {
return "management-cluster-init"
}
func (s *createManagementCluster) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *createManagementCluster) Checkpoint() *task.CompletedTask {
return nil
}
func (s *installCAPI) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Installing cluster-api providers on management cluster")
err := commandContext.ClusterManager.InstallCAPI(ctx, commandContext.ClusterSpec, commandContext.BootstrapCluster, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &deleteManagementCluster{}
}
return &moveClusterManagement{}
}
func (s *installCAPI) Name() string {
return "install-capi"
}
func (s *installCAPI) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *installCAPI) Checkpoint() *task.CompletedTask {
return nil
}
func (s *moveClusterManagement) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Moving cluster management from workload cluster")
err := commandContext.ClusterManager.MoveCAPI(ctx, commandContext.WorkloadCluster, commandContext.BootstrapCluster, commandContext.WorkloadCluster.Name, commandContext.ClusterSpec, types.WithNodeRef())
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &deleteWorkloadCluster{}
}
func (s *moveClusterManagement) Name() string {
return "cluster-management-move"
}
func (s *moveClusterManagement) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *moveClusterManagement) Checkpoint() *task.CompletedTask {
return nil
}
func (s *deleteWorkloadCluster) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Deleting workload cluster")
err := commandContext.ClusterManager.DeleteCluster(ctx, commandContext.BootstrapCluster, commandContext.WorkloadCluster, commandContext.Provider, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &cleanupGitRepo{}
}
func (s *deleteWorkloadCluster) Name() string {
return "delete-workload-cluster"
}
func (s *deleteWorkloadCluster) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *deleteWorkloadCluster) Checkpoint() *task.CompletedTask {
return nil
}
func (s *cleanupGitRepo) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Clean up Git Repo")
err := commandContext.GitOpsManager.CleanupGitRepo(ctx, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &deletePackageResources{}
}
func (s *cleanupGitRepo) Name() string {
return "clean-up-git-repo"
}
func (s *cleanupGitRepo) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *cleanupGitRepo) Checkpoint() *task.CompletedTask {
return nil
}
func (s *deletePackageResources) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if !commandContext.BootstrapCluster.ExistingManagement {
return &deleteManagementCluster{}
}
logger.Info("Delete package resources", "clusterName", commandContext.WorkloadCluster.Name)
cluster := commandContext.ManagementCluster
if cluster == nil {
cluster = commandContext.BootstrapCluster
}
err := commandContext.ClusterManager.DeletePackageResources(ctx, cluster, commandContext.WorkloadCluster.Name)
if err != nil {
logger.Info("Problem delete package resources", "error", err)
}
// A bit odd to traverse to this state here, but it is the terminal state
return &deleteManagementCluster{}
}
func (s *deletePackageResources) Name() string {
return "package-resource-delete"
}
func (s *deletePackageResources) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *deletePackageResources) Checkpoint() *task.CompletedTask {
return nil
}
func (s *deleteManagementCluster) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.OriginalError != nil {
collector := &CollectMgmtClusterDiagnosticsTask{}
collector.Run(ctx, commandContext)
}
if commandContext.BootstrapCluster != nil && !commandContext.BootstrapCluster.ExistingManagement {
if err := commandContext.Bootstrapper.DeleteBootstrapCluster(ctx, commandContext.BootstrapCluster, constants.Delete, false); err != nil {
commandContext.SetError(err)
}
return nil
}
logger.Info("Bootstrap cluster information missing - skipping delete kind cluster")
if commandContext.OriginalError == nil {
logger.MarkSuccess("Cluster deleted!")
}
return nil
}
func (s *deleteManagementCluster) Name() string {
return "kind-cluster-delete"
}
func (s *deleteManagementCluster) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *deleteManagementCluster) Checkpoint() *task.CompletedTask {
return nil
}
| 290 |
eks-anywhere | aws | Go | package workflows_test
import (
"context"
"fmt"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/filewriter"
providermocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/workflows"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces/mocks"
)
type deleteTestSetup struct {
t *testing.T
bootstrapper *mocks.MockBootstrapper
clusterManager *mocks.MockClusterManager
gitOpsManager *mocks.MockGitOpsManager
writer filewriter.FileWriter
provider *providermocks.MockProvider
workflow *workflows.Delete
ctx context.Context
clusterSpec *cluster.Spec
forceCleanup bool
bootstrapCluster *types.Cluster
workloadCluster *types.Cluster
}
func newDeleteTest(t *testing.T) *deleteTestSetup {
mockCtrl := gomock.NewController(t)
mockBootstrapper := mocks.NewMockBootstrapper(mockCtrl)
clusterManager := mocks.NewMockClusterManager(mockCtrl)
gitOpsManager := mocks.NewMockGitOpsManager(mockCtrl)
_, writer := test.NewWriter(t)
provider := providermocks.NewMockProvider(mockCtrl)
workflow := workflows.NewDelete(mockBootstrapper, provider, clusterManager, gitOpsManager, writer)
return &deleteTestSetup{
t: t,
bootstrapper: mockBootstrapper,
clusterManager: clusterManager,
gitOpsManager: gitOpsManager,
provider: provider,
workflow: workflow,
writer: writer,
ctx: context.Background(),
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "cluster-name" }),
bootstrapCluster: &types.Cluster{Name: "bootstrap"},
workloadCluster: &types.Cluster{Name: "workload"},
}
}
func (c *deleteTestSetup) expectSetup() {
c.provider.EXPECT().SetupAndValidateDeleteCluster(c.ctx, c.workloadCluster, c.clusterSpec)
}
func (c *deleteTestSetup) expectCreateBootstrap() {
opts := []bootstrapper.BootstrapClusterOption{
bootstrapper.WithExtraDockerMounts(),
}
gomock.InOrder(
c.provider.EXPECT().BootstrapClusterOpts(c.clusterSpec).Return(opts, nil),
c.bootstrapper.EXPECT().CreateBootstrapCluster(
c.ctx, gomock.Not(gomock.Nil()), gomock.Not(gomock.Nil()),
).Return(c.bootstrapCluster, nil),
c.provider.EXPECT().PreCAPIInstallOnBootstrap(c.ctx, c.bootstrapCluster, c.clusterSpec),
c.clusterManager.EXPECT().InstallCAPI(c.ctx, gomock.Not(gomock.Nil()), c.bootstrapCluster, c.provider),
)
}
func (c *deleteTestSetup) expectNotToCreateBootstrap() {
opts := []bootstrapper.BootstrapClusterOption{
bootstrapper.WithExtraDockerMounts(),
}
c.provider.EXPECT().BootstrapClusterOpts(c.clusterSpec).Return(opts, nil).Times(0)
c.bootstrapper.EXPECT().CreateBootstrapCluster(
c.ctx, gomock.Not(gomock.Nil()), gomock.Not(gomock.Nil()),
).Return(c.bootstrapCluster, nil).Times(0)
c.clusterManager.EXPECT().InstallCAPI(c.ctx, gomock.Not(gomock.Nil()), c.bootstrapCluster, c.provider).Times(0)
}
func (c *deleteTestSetup) expectDeletePackageResources() {
c.clusterManager.EXPECT().DeletePackageResources(c.ctx, c.clusterSpec.ManagementCluster, gomock.Any()).Return(nil)
}
func (c *deleteTestSetup) expectNotToDeletePackageResources() {
c.clusterManager.EXPECT().DeletePackageResources(c.ctx, c.clusterSpec.ManagementCluster, gomock.Any()).Return(nil).Times(0)
}
func (c *deleteTestSetup) expectDeleteBootstrap() {
gomock.InOrder(
c.bootstrapper.EXPECT().DeleteBootstrapCluster(
c.ctx, c.bootstrapCluster,
gomock.Any(),
gomock.Any()).Return(nil),
)
}
func (c *deleteTestSetup) expectNotToDeleteBootstrap() {
c.bootstrapper.EXPECT().DeleteBootstrapCluster(c.ctx, c.bootstrapCluster, gomock.Any(), gomock.Any()).Return(nil).Times(0)
}
func (c *deleteTestSetup) expectDeleteWorkload(cluster *types.Cluster) {
gomock.InOrder(
c.clusterManager.EXPECT().DeleteCluster(
c.ctx, cluster, c.workloadCluster, c.provider, c.clusterSpec,
).Return(nil),
)
}
func (c *deleteTestSetup) expectCleanupGitRepo() {
gomock.InOrder(
c.gitOpsManager.EXPECT().CleanupGitRepo(
c.ctx, c.clusterSpec,
).Return(nil),
)
}
func (c *deleteTestSetup) expectMoveManagement() {
gomock.InOrder(
c.clusterManager.EXPECT().MoveCAPI(
c.ctx, c.workloadCluster, c.bootstrapCluster, c.workloadCluster.Name, c.clusterSpec, gomock.Any(),
),
)
}
func (c *deleteTestSetup) expectNotToMoveManagement() {
gomock.InOrder(
c.clusterManager.EXPECT().MoveCAPI(
c.ctx, c.workloadCluster, c.bootstrapCluster, c.workloadCluster.Name, gomock.Any(),
).Times(0),
)
}
func (c *deleteTestSetup) run() error {
// ctx context.Context, workloadCluster *types.Cluster, forceCleanup bool
return c.workflow.Run(c.ctx, c.workloadCluster, c.clusterSpec, c.forceCleanup, "")
}
func TestDeleteRunSuccess(t *testing.T) {
test := newDeleteTest(t)
test.expectSetup()
test.expectCreateBootstrap()
test.expectDeleteWorkload(test.bootstrapCluster)
test.expectCleanupGitRepo()
test.expectMoveManagement()
test.expectNotToDeletePackageResources()
test.expectDeleteBootstrap()
err := test.run()
if err != nil {
t.Fatalf("Delete.Run() err = %v, want err = nil", err)
}
}
func TestDeleteWorkloadRunSuccess(t *testing.T) {
test := newDeleteTest(t)
test.expectSetup()
test.expectNotToCreateBootstrap()
test.clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
test.clusterSpec.Cluster.SetManagedBy(test.clusterSpec.ManagementCluster.Name)
test.expectDeleteWorkload(test.clusterSpec.ManagementCluster)
test.expectCleanupGitRepo()
test.expectNotToMoveManagement()
test.expectDeletePackageResources()
test.expectNotToDeleteBootstrap()
err := test.run()
if err != nil {
t.Fatalf("Delete.Run() err = %v, want err = nil", err)
}
}
func TestDeleteWorkloadDeletePackageResourceError(t *testing.T) {
test := newDeleteTest(t)
test.expectSetup()
test.expectNotToCreateBootstrap()
test.clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
test.clusterSpec.Cluster.SetManagedBy(test.clusterSpec.ManagementCluster.Name)
test.expectDeleteWorkload(test.clusterSpec.ManagementCluster)
test.expectCleanupGitRepo()
test.expectNotToMoveManagement()
test.clusterManager.EXPECT().DeletePackageResources(test.ctx, test.clusterSpec.ManagementCluster, gomock.Any()).Return(fmt.Errorf("boom"))
test.expectNotToDeleteBootstrap()
err := test.run()
if err != nil {
t.Fatalf("Delete.Run() err = %v, want err = nil", err)
}
}
| 209 |
eks-anywhere | aws | Go | package workflows
import (
"context"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/task"
)
type CollectDiagnosticsTask struct {
*CollectWorkloadClusterDiagnosticsTask
*CollectMgmtClusterDiagnosticsTask
}
type CollectWorkloadClusterDiagnosticsTask struct{}
type CollectMgmtClusterDiagnosticsTask struct{}
// CollectDiagnosticsTask implementation
func (s *CollectDiagnosticsTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("collecting cluster diagnostics")
_ = s.CollectMgmtClusterDiagnosticsTask.Run(ctx, commandContext)
_ = s.CollectWorkloadClusterDiagnosticsTask.Run(ctx, commandContext)
return nil
}
func (s *CollectDiagnosticsTask) Name() string {
return "collect-cluster-diagnostics"
}
func (s *CollectDiagnosticsTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return s.Run(ctx, commandContext), nil
}
func (s *CollectDiagnosticsTask) Checkpoint() *task.CompletedTask {
return nil
}
// CollectWorkloadClusterDiagnosticsTask implementation
func (s *CollectWorkloadClusterDiagnosticsTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("collecting workload cluster diagnostics")
_ = commandContext.ClusterManager.SaveLogsWorkloadCluster(ctx, commandContext.Provider, commandContext.ClusterSpec, commandContext.WorkloadCluster)
return nil
}
func (s *CollectWorkloadClusterDiagnosticsTask) Name() string {
return "collect-workload-cluster-diagnostics"
}
// CollectMgmtClusterDiagnosticsTask implementation
func (s *CollectMgmtClusterDiagnosticsTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("collecting management cluster diagnostics")
_ = commandContext.ClusterManager.SaveLogsManagementCluster(ctx, commandContext.ClusterSpec, commandContext.BootstrapCluster)
return nil
}
func (s *CollectMgmtClusterDiagnosticsTask) Name() string {
return "collect-management-cluster-diagnostics"
}
func (s *CollectMgmtClusterDiagnosticsTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return nil, nil
}
func (s *CollectMgmtClusterDiagnosticsTask) Checkpoint() *task.CompletedTask {
return nil
}
| 71 |
eks-anywhere | aws | Go | package workflows
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermarshaller"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/task"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces"
)
type Upgrade struct {
bootstrapper interfaces.Bootstrapper
provider providers.Provider
clusterManager interfaces.ClusterManager
gitOpsManager interfaces.GitOpsManager
writer filewriter.FileWriter
capiManager interfaces.CAPIManager
eksdInstaller interfaces.EksdInstaller
eksdUpgrader interfaces.EksdUpgrader
clusterUpgrader interfaces.ClusterUpgrader
upgradeChangeDiff *types.ChangeDiff
}
func NewUpgrade(bootstrapper interfaces.Bootstrapper, provider providers.Provider,
capiManager interfaces.CAPIManager,
clusterManager interfaces.ClusterManager,
gitOpsManager interfaces.GitOpsManager,
writer filewriter.FileWriter,
eksdUpgrader interfaces.EksdUpgrader,
eksdInstaller interfaces.EksdInstaller,
clusterUpgrader interfaces.ClusterUpgrader,
) *Upgrade {
upgradeChangeDiff := types.NewChangeDiff()
return &Upgrade{
bootstrapper: bootstrapper,
provider: provider,
clusterManager: clusterManager,
gitOpsManager: gitOpsManager,
writer: writer,
capiManager: capiManager,
eksdUpgrader: eksdUpgrader,
eksdInstaller: eksdInstaller,
clusterUpgrader: clusterUpgrader,
upgradeChangeDiff: upgradeChangeDiff,
}
}
func (c *Upgrade) Run(ctx context.Context, clusterSpec *cluster.Spec, managementCluster *types.Cluster, workloadCluster *types.Cluster, validator interfaces.Validator, forceCleanup bool) error {
commandContext := &task.CommandContext{
Bootstrapper: c.bootstrapper,
Provider: c.provider,
ClusterManager: c.clusterManager,
GitOpsManager: c.gitOpsManager,
ManagementCluster: managementCluster,
WorkloadCluster: workloadCluster,
ClusterSpec: clusterSpec,
Validations: validator,
Writer: c.writer,
CAPIManager: c.capiManager,
EksdInstaller: c.eksdInstaller,
EksdUpgrader: c.eksdUpgrader,
ClusterUpgrader: c.clusterUpgrader,
UpgradeChangeDiff: c.upgradeChangeDiff,
ForceCleanup: forceCleanup,
}
if features.IsActive(features.CheckpointEnabled()) {
return task.NewTaskRunner(&setupAndValidateTasks{}, c.writer, task.WithCheckpointFile()).RunTask(ctx, commandContext)
}
return task.NewTaskRunner(&setupAndValidateTasks{}, c.writer).RunTask(ctx, commandContext)
}
type setupAndValidateTasks struct{}
type updateSecrets struct{}
type ensureEtcdCAPIComponentsExistTask struct{}
type upgradeCoreComponents struct {
UpgradeChangeDiff *types.ChangeDiff
}
type upgradeNeeded struct{}
type pauseEksaReconcile struct{}
type createBootstrapClusterTask struct {
bootstrapCluster *types.Cluster
}
type installCAPITask struct{}
type moveManagementToBootstrapTask struct{}
type moveManagementToWorkloadTask struct{}
type upgradeWorkloadClusterTask struct{}
type deleteBootstrapClusterTask struct {
*CollectDiagnosticsTask
}
type updateClusterAndGitResources struct{}
// reconcileClusterDefinitions updates all the places that have a cluster definition to follow the cluster config provided to this workflow:
// the eks-a objects in the management cluster and the cluster config in the git repo if GitOps is enabled. It also resumes the eks-a controller
// manager and GitOps reconciliations.
type reconcileClusterDefinitions struct {
eksaSpecDiff bool
}
type writeClusterConfigTask struct{}
func (s *setupAndValidateTasks) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Performing setup and validations")
currentSpec, err := commandContext.ClusterManager.GetCurrentClusterSpec(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec.Cluster.Name)
if err != nil {
commandContext.SetError(err)
return nil
}
commandContext.CurrentClusterSpec = currentSpec
runner := validations.NewRunner()
runner.Register(s.providerValidation(ctx, commandContext)...)
runner.Register(commandContext.Validations.PreflightValidations(ctx)...)
err = runner.Run()
if err != nil {
commandContext.SetError(err)
return nil
}
return &updateSecrets{}
}
func (s *setupAndValidateTasks) providerValidation(ctx context.Context, commandContext *task.CommandContext) []validations.Validation {
return []validations.Validation{
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: fmt.Sprintf("%s provider validation", commandContext.Provider.Name()),
Err: commandContext.Provider.SetupAndValidateUpgradeCluster(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec, commandContext.CurrentClusterSpec),
}
},
}
}
func (s *setupAndValidateTasks) Name() string {
return "setup-and-validate"
}
func (s *setupAndValidateTasks) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
if err := commandContext.Provider.SetupAndValidateUpgradeCluster(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec, commandContext.CurrentClusterSpec); err != nil {
commandContext.SetError(err)
return nil, err
}
logger.Info(fmt.Sprintf("%s Provider setup is valid", commandContext.Provider.Name()))
currentSpec, err := commandContext.ClusterManager.GetCurrentClusterSpec(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec.Cluster.Name)
if err != nil {
commandContext.SetError(err)
return nil, err
}
commandContext.CurrentClusterSpec = currentSpec
return &updateSecrets{}, nil
}
func (s *setupAndValidateTasks) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *updateSecrets) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
err := commandContext.Provider.UpdateSecrets(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &ensureEtcdCAPIComponentsExistTask{}
}
func (s *updateSecrets) Name() string {
return "update-secrets"
}
func (s *updateSecrets) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *updateSecrets) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &ensureEtcdCAPIComponentsExistTask{}, nil
}
func (s *ensureEtcdCAPIComponentsExistTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Ensuring etcd CAPI providers exist on management cluster before upgrade")
if err := commandContext.CAPIManager.EnsureEtcdProvidersInstallation(ctx, commandContext.ManagementCluster, commandContext.Provider, commandContext.CurrentClusterSpec); err != nil {
commandContext.SetError(err)
return nil
}
return &pauseEksaReconcile{}
}
func (s *ensureEtcdCAPIComponentsExistTask) Name() string {
return "ensure-etcd-capi-components-exist"
}
func (s *ensureEtcdCAPIComponentsExistTask) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *ensureEtcdCAPIComponentsExistTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &pauseEksaReconcile{}, nil
}
func (s *upgradeCoreComponents) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Upgrading core components")
err := commandContext.Provider.PreCoreComponentsUpgrade(
ctx,
commandContext.ManagementCluster,
commandContext.ClusterSpec,
)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
changeDiff, err := commandContext.ClusterManager.UpgradeNetworking(ctx, commandContext.WorkloadCluster, commandContext.CurrentClusterSpec, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.UpgradeChangeDiff.Append(changeDiff)
changeDiff, err = commandContext.CAPIManager.Upgrade(ctx, commandContext.ManagementCluster, commandContext.Provider, commandContext.CurrentClusterSpec, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.UpgradeChangeDiff.Append(changeDiff)
if err = commandContext.GitOpsManager.Install(ctx, commandContext.ManagementCluster, commandContext.CurrentClusterSpec, commandContext.ClusterSpec); err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
changeDiff, err = commandContext.GitOpsManager.Upgrade(ctx, commandContext.ManagementCluster, commandContext.CurrentClusterSpec, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.UpgradeChangeDiff.Append(changeDiff)
changeDiff, err = commandContext.ClusterManager.Upgrade(ctx, commandContext.ManagementCluster, commandContext.CurrentClusterSpec, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.UpgradeChangeDiff.Append(changeDiff)
changeDiff, err = commandContext.EksdUpgrader.Upgrade(ctx, commandContext.ManagementCluster, commandContext.CurrentClusterSpec, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.UpgradeChangeDiff.Append(changeDiff)
s.UpgradeChangeDiff = commandContext.UpgradeChangeDiff
return &upgradeNeeded{}
}
func (s *upgradeCoreComponents) Name() string {
return "upgrade-core-components"
}
func (s *upgradeCoreComponents) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: s.UpgradeChangeDiff,
}
}
func (s *upgradeCoreComponents) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
s.UpgradeChangeDiff = &types.ChangeDiff{}
if err := task.UnmarshalTaskCheckpoint(completedTask.Checkpoint, s.UpgradeChangeDiff); err != nil {
return nil, err
}
commandContext.UpgradeChangeDiff = s.UpgradeChangeDiff
return &upgradeNeeded{}, nil
}
func (s *upgradeNeeded) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
newSpec := commandContext.ClusterSpec
if upgradeNeeded, err := commandContext.Provider.UpgradeNeeded(ctx, newSpec, commandContext.CurrentClusterSpec, commandContext.ManagementCluster); err != nil {
commandContext.SetError(err)
return nil
} else if upgradeNeeded {
logger.V(3).Info("Provider needs a cluster upgrade")
return &createBootstrapClusterTask{}
}
diff, err := commandContext.ClusterManager.EKSAClusterSpecChanged(ctx, commandContext.ManagementCluster, newSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
if !diff {
logger.Info("No upgrades needed from cluster spec")
return &reconcileClusterDefinitions{eksaSpecDiff: false}
}
return &createBootstrapClusterTask{}
}
func (s *upgradeNeeded) Name() string {
return "upgrade-needed"
}
func (s *upgradeNeeded) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *upgradeNeeded) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &createBootstrapClusterTask{}, nil
}
func (s *pauseEksaReconcile) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Pausing EKS-A cluster controller reconcile")
err := commandContext.ClusterManager.PauseEKSAControllerReconcile(ctx, commandContext.ManagementCluster, commandContext.CurrentClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Pausing GitOps cluster resources reconcile")
err = commandContext.GitOpsManager.PauseClusterResourcesReconcile(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &upgradeCoreComponents{}
}
func (s *pauseEksaReconcile) Name() string {
return "pause-controllers-reconcile"
}
func (s *pauseEksaReconcile) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *pauseEksaReconcile) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &upgradeCoreComponents{}, nil
}
func (s *createBootstrapClusterTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.ForceCleanup {
if err := commandContext.Bootstrapper.DeleteBootstrapCluster(ctx, &types.Cluster{
Name: commandContext.ClusterSpec.Cluster.Name,
}, constants.Upgrade, commandContext.ForceCleanup); err != nil {
commandContext.SetError(err)
return nil
}
}
if commandContext.ManagementCluster != nil && commandContext.ManagementCluster.ExistingManagement {
return &upgradeWorkloadClusterTask{}
}
logger.Info("Creating bootstrap cluster")
bootstrapOptions, err := commandContext.Provider.BootstrapClusterOpts(commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return nil
}
bootstrapCluster, err := commandContext.Bootstrapper.CreateBootstrapCluster(ctx, commandContext.ClusterSpec, bootstrapOptions...)
commandContext.BootstrapCluster = bootstrapCluster
if err != nil {
commandContext.SetError(err)
return &deleteBootstrapClusterTask{}
}
logger.Info("Provider specific pre-capi-install-setup on bootstrap cluster")
if err = commandContext.Provider.PreCAPIInstallOnBootstrap(ctx, bootstrapCluster, commandContext.ClusterSpec); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
logger.Info("Provider specific post-setup")
if err = commandContext.Provider.PostBootstrapSetupUpgrade(ctx, commandContext.ClusterSpec.Cluster, bootstrapCluster); err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
s.bootstrapCluster = bootstrapCluster
return &installCAPITask{}
}
func (s *createBootstrapClusterTask) Name() string {
return "bootstrap-cluster-init"
}
func (s *createBootstrapClusterTask) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: s.bootstrapCluster,
}
}
func (s *createBootstrapClusterTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
s.bootstrapCluster = &types.Cluster{}
if err := task.UnmarshalTaskCheckpoint(completedTask.Checkpoint, s.bootstrapCluster); err != nil {
return nil, err
}
commandContext.BootstrapCluster = s.bootstrapCluster
if commandContext.ManagementCluster != nil && commandContext.ManagementCluster.ExistingManagement {
return &upgradeWorkloadClusterTask{}, nil
}
return &installCAPITask{}, nil
}
func (s *installCAPITask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Installing cluster-api providers on bootstrap cluster")
err := commandContext.ClusterManager.InstallCAPI(ctx, commandContext.ClusterSpec, commandContext.BootstrapCluster, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectMgmtClusterDiagnosticsTask{}
}
return &moveManagementToBootstrapTask{}
}
func (s *installCAPITask) Name() string {
return "install-capi"
}
func (s *installCAPITask) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *installCAPITask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &moveManagementToBootstrapTask{}, nil
}
func (s *moveManagementToBootstrapTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Backing up workload cluster's management resources before moving to bootstrap cluster")
err := commandContext.ClusterManager.BackupCAPI(ctx, commandContext.WorkloadCluster, commandContext.ManagementClusterStateDir)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.V(3).Info("Pausing workload clusters before moving management cluster resources to bootstrap cluster")
err = commandContext.ClusterManager.PauseCAPIWorkloadClusters(ctx, commandContext.WorkloadCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Moving management cluster from workload to bootstrap cluster")
err = commandContext.ClusterManager.MoveCAPI(ctx, commandContext.WorkloadCluster, commandContext.BootstrapCluster, commandContext.WorkloadCluster.Name, commandContext.ClusterSpec, types.WithNodeRef(), types.WithNodeHealthy())
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.V(3).Info("Provider specific post management move")
err = commandContext.Provider.PostMoveManagementToBootstrap(ctx, commandContext.BootstrapCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.ManagementCluster = commandContext.BootstrapCluster
return &upgradeWorkloadClusterTask{}
}
func (s *moveManagementToBootstrapTask) Name() string {
return "capi-management-move-to-bootstrap"
}
func (s *moveManagementToBootstrapTask) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *moveManagementToBootstrapTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
commandContext.ManagementCluster = commandContext.BootstrapCluster
return &upgradeWorkloadClusterTask{}, nil
}
func (s *upgradeWorkloadClusterTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
eksaManagementCluster := commandContext.WorkloadCluster
if commandContext.ManagementCluster != nil && commandContext.ManagementCluster.ExistingManagement {
eksaManagementCluster = commandContext.ManagementCluster
}
if err := commandContext.ClusterUpgrader.PrepareUpgrade(
ctx,
commandContext.ClusterSpec,
commandContext.ManagementCluster.KubeconfigFile,
commandContext.WorkloadCluster.KubeconfigFile,
); err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Upgrading workload cluster")
err := commandContext.ClusterManager.UpgradeCluster(ctx, commandContext.ManagementCluster, commandContext.WorkloadCluster, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
if err := commandContext.ClusterUpgrader.CleanupAfterUpgrade(
ctx,
commandContext.ClusterSpec,
commandContext.ManagementCluster.KubeconfigFile,
commandContext.WorkloadCluster.KubeconfigFile,
); err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
if commandContext.UpgradeChangeDiff.Changed() {
if err = commandContext.ClusterManager.ApplyBundles(ctx, commandContext.ClusterSpec, eksaManagementCluster); err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
}
return &moveManagementToWorkloadTask{}
}
func (s *upgradeWorkloadClusterTask) Name() string {
return "upgrade-workload-cluster"
}
func (s *upgradeWorkloadClusterTask) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *upgradeWorkloadClusterTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &moveManagementToWorkloadTask{}, nil
}
func (s *moveManagementToWorkloadTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.ManagementCluster.ExistingManagement {
return &reconcileClusterDefinitions{eksaSpecDiff: true}
}
logger.Info("Moving cluster management from bootstrap to workload cluster")
err := commandContext.ClusterManager.MoveCAPI(ctx, commandContext.BootstrapCluster, commandContext.WorkloadCluster, commandContext.WorkloadCluster.Name, commandContext.ClusterSpec, types.WithNodeRef(), types.WithNodeHealthy())
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
commandContext.ManagementCluster = commandContext.WorkloadCluster
logger.V(3).Info("Resuming all workload clusters after moving management cluster resources from bootstrap to management clusters")
err = commandContext.ClusterManager.ResumeCAPIWorkloadClusters(ctx, commandContext.ManagementCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
return &reconcileClusterDefinitions{eksaSpecDiff: true}
}
func (s *moveManagementToWorkloadTask) Name() string {
return "capi-management-move-to-workload"
}
func (s *moveManagementToWorkloadTask) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *moveManagementToWorkloadTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
if !commandContext.ManagementCluster.ExistingManagement {
commandContext.ManagementCluster = commandContext.WorkloadCluster
}
return &reconcileClusterDefinitions{eksaSpecDiff: true}, nil
}
func (s *reconcileClusterDefinitions) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Updating EKS-A cluster resource")
datacenterConfig := commandContext.Provider.DatacenterConfig(commandContext.ClusterSpec)
machineConfigs := commandContext.Provider.MachineConfigs(commandContext.ClusterSpec)
err := commandContext.ClusterManager.CreateEKSAResources(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec, datacenterConfig, machineConfigs)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
err = commandContext.EksdInstaller.InstallEksdManifest(ctx, commandContext.ClusterSpec, commandContext.ManagementCluster)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Resuming EKS-A controller reconcile")
err = commandContext.ClusterManager.ResumeEKSAControllerReconcile(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Updating Git Repo with new EKS-A cluster spec")
err = commandContext.GitOpsManager.UpdateGitEksaSpec(ctx, commandContext.ClusterSpec, datacenterConfig, machineConfigs)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Forcing reconcile Git repo with latest commit")
err = commandContext.GitOpsManager.ForceReconcileGitRepo(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec)
if err != nil {
commandContext.SetError(err)
return &CollectDiagnosticsTask{}
}
logger.Info("Resuming GitOps cluster resources kustomization")
err = commandContext.GitOpsManager.ResumeClusterResourcesReconcile(ctx, commandContext.ManagementCluster, commandContext.ClusterSpec, commandContext.Provider)
if err != nil {
commandContext.SetError(err)
return &writeClusterConfigTask{}
}
if !s.eksaSpecDiff {
return nil
}
return &writeClusterConfigTask{}
}
func (s *reconcileClusterDefinitions) Name() string {
return "resume-eksa-and-gitops-kustomization"
}
func (s *reconcileClusterDefinitions) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *reconcileClusterDefinitions) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &writeClusterConfigTask{}, nil
}
func (s *writeClusterConfigTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
logger.Info("Writing cluster config file")
err := clustermarshaller.WriteClusterConfig(commandContext.ClusterSpec, commandContext.Provider.DatacenterConfig(commandContext.ClusterSpec), commandContext.Provider.MachineConfigs(commandContext.ClusterSpec), commandContext.Writer)
if err != nil {
commandContext.SetError(err)
}
return &deleteBootstrapClusterTask{}
}
func (s *writeClusterConfigTask) Name() string {
return "write-cluster-config"
}
func (s *writeClusterConfigTask) Checkpoint() *task.CompletedTask {
return &task.CompletedTask{
Checkpoint: nil,
}
}
func (s *writeClusterConfigTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) {
return &deleteBootstrapClusterTask{}, nil
}
func (s *deleteBootstrapClusterTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task {
if commandContext.OriginalError != nil {
c := CollectDiagnosticsTask{}
c.Run(ctx, commandContext)
}
if commandContext.BootstrapCluster != nil && !commandContext.BootstrapCluster.ExistingManagement {
if err := commandContext.Bootstrapper.DeleteBootstrapCluster(ctx, commandContext.BootstrapCluster, constants.Upgrade, false); err != nil {
commandContext.SetError(err)
}
if commandContext.OriginalError == nil {
logger.MarkSuccess("Cluster upgraded!")
}
if err := commandContext.Provider.PostBootstrapDeleteForUpgrade(ctx); err != nil {
// Cluster has been succesfully upgraded, bootstrap cluster successfully deleted
// We don't necessarily need to return with an error here and abort
logger.Info(fmt.Sprintf("%v", err))
}
capiObjectFile := filepath.Join(commandContext.BootstrapCluster.Name, commandContext.ManagementClusterStateDir)
if err := os.RemoveAll(capiObjectFile); err != nil {
logger.Info(fmt.Sprintf("management cluster CAPI backup file not found: %v", err))
}
return nil
}
logger.Info("Bootstrap cluster information missing - skipping delete kind cluster")
if commandContext.OriginalError == nil {
logger.MarkSuccess("Cluster upgraded!")
}
return nil
}
func (s *deleteBootstrapClusterTask) Name() string {
return "delete-kind-cluster"
}
| 726 |
eks-anywhere | aws | Go | package workflows_test
import (
"context"
"errors"
"fmt"
"os"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/features"
writermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/providers"
providermocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/workflows"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces/mocks"
)
type upgradeTestSetup struct {
t *testing.T
bootstrapper *mocks.MockBootstrapper
clusterManager *mocks.MockClusterManager
gitOpsManager *mocks.MockGitOpsManager
provider *providermocks.MockProvider
writer *writermocks.MockFileWriter
validator *mocks.MockValidator
eksdInstaller *mocks.MockEksdInstaller
eksdUpgrader *mocks.MockEksdUpgrader
capiManager *mocks.MockCAPIManager
clusterUpgrader *mocks.MockClusterUpgrader
datacenterConfig providers.DatacenterConfig
machineConfigs []providers.MachineConfig
workflow *workflows.Upgrade
ctx context.Context
newClusterSpec *cluster.Spec
currentClusterSpec *cluster.Spec
forceCleanup bool
bootstrapCluster *types.Cluster
workloadCluster *types.Cluster
managementCluster *types.Cluster
managementStatePath string
}
func newUpgradeTest(t *testing.T) *upgradeTestSetup {
featureEnvVars := []string{}
mockCtrl := gomock.NewController(t)
bootstrapper := mocks.NewMockBootstrapper(mockCtrl)
clusterManager := mocks.NewMockClusterManager(mockCtrl)
gitOpsManager := mocks.NewMockGitOpsManager(mockCtrl)
provider := providermocks.NewMockProvider(mockCtrl)
writer := writermocks.NewMockFileWriter(mockCtrl)
validator := mocks.NewMockValidator(mockCtrl)
eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl)
eksdUpgrader := mocks.NewMockEksdUpgrader(mockCtrl)
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{}
capiUpgrader := mocks.NewMockCAPIManager(mockCtrl)
machineConfigs := []providers.MachineConfig{&v1alpha1.VSphereMachineConfig{}}
clusterUpgrader := mocks.NewMockClusterUpgrader(mockCtrl)
workflow := workflows.NewUpgrade(
bootstrapper,
provider,
capiUpgrader,
clusterManager,
gitOpsManager,
writer,
eksdUpgrader,
eksdInstaller,
clusterUpgrader,
)
for _, e := range featureEnvVars {
t.Setenv(e, "true")
}
return &upgradeTestSetup{
t: t,
bootstrapper: bootstrapper,
clusterManager: clusterManager,
gitOpsManager: gitOpsManager,
provider: provider,
writer: writer,
validator: validator,
eksdInstaller: eksdInstaller,
eksdUpgrader: eksdUpgrader,
capiManager: capiUpgrader,
clusterUpgrader: clusterUpgrader,
datacenterConfig: datacenterConfig,
machineConfigs: machineConfigs,
workflow: workflow,
ctx: context.Background(),
newClusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "cluster-name" }),
workloadCluster: &types.Cluster{Name: "workload"},
managementStatePath: fmt.Sprintf("cluster-state-backup-%s", time.Now().Format("2006-01-02T15_04_05")),
}
}
func (c *upgradeTestSetup) WithForceCleanup() *upgradeTestSetup {
c.forceCleanup = true
return c
}
func newUpgradeSelfManagedClusterTest(t *testing.T) *upgradeTestSetup {
tt := newUpgradeTest(t)
tt.bootstrapCluster = &types.Cluster{
Name: "bootstrap",
ExistingManagement: false,
KubeconfigFile: "kubeconfig.yaml",
}
tt.managementCluster = tt.workloadCluster
return tt
}
func newUpgradeManagedClusterTest(t *testing.T) *upgradeTestSetup {
tt := newUpgradeTest(t)
tt.managementCluster = &types.Cluster{
Name: "management-cluster",
ExistingManagement: true,
KubeconfigFile: "kubeconfig.yaml",
}
tt.workloadCluster.KubeconfigFile = "wl-kubeconfig.yaml"
tt.newClusterSpec.Cluster.SetSelfManaged()
tt.newClusterSpec.ManagementCluster = tt.managementCluster
return tt
}
func (c *upgradeTestSetup) expectSetup() {
c.provider.EXPECT().SetupAndValidateUpgradeCluster(c.ctx, gomock.Any(), c.newClusterSpec, c.currentClusterSpec)
c.provider.EXPECT().Name()
c.clusterManager.EXPECT().GetCurrentClusterSpec(c.ctx, gomock.Any(), c.newClusterSpec.Cluster.Name).Return(c.currentClusterSpec, nil)
}
func (c *upgradeTestSetup) expectSetupToFail() {
c.clusterManager.EXPECT().GetCurrentClusterSpec(c.ctx, gomock.Any(), c.newClusterSpec.Cluster.Name).Return(nil, errors.New("failed setup"))
}
func (c *upgradeTestSetup) expectUpdateSecrets(expectedCluster *types.Cluster) {
gomock.InOrder(
c.provider.EXPECT().UpdateSecrets(c.ctx, expectedCluster, c.newClusterSpec).Return(nil),
)
}
func (c *upgradeTestSetup) expectEnsureEtcdCAPIComponentsExistTask(expectedCluster *types.Cluster) {
currentSpec := c.currentClusterSpec
gomock.InOrder(
c.capiManager.EXPECT().EnsureEtcdProvidersInstallation(c.ctx, expectedCluster, c.provider, currentSpec),
)
}
func (c *upgradeTestSetup) expectUpgradeCoreComponents(managementCluster *types.Cluster, workloadCluster *types.Cluster) {
currentSpec := c.currentClusterSpec
networkingChangeDiff := types.NewChangeDiff(&types.ComponentChangeDiff{
ComponentName: "cilium",
OldVersion: "v0.0.1",
NewVersion: "v0.0.2",
})
capiChangeDiff := types.NewChangeDiff(&types.ComponentChangeDiff{
ComponentName: "vsphere",
OldVersion: "v0.0.1",
NewVersion: "v0.0.2",
})
fluxChangeDiff := types.NewChangeDiff(&types.ComponentChangeDiff{
ComponentName: "Flux",
OldVersion: "v0.0.1",
NewVersion: "v0.0.2",
})
eksaChangeDiff := types.NewChangeDiff(&types.ComponentChangeDiff{
ComponentName: "eks-a",
OldVersion: "v0.0.1",
NewVersion: "v0.0.2",
})
eksdChangeDiff := types.NewChangeDiff(&types.ComponentChangeDiff{
ComponentName: "eks-d",
OldVersion: "v0.0.1",
NewVersion: "v0.0.2",
})
gomock.InOrder(
c.clusterManager.EXPECT().UpgradeNetworking(c.ctx, workloadCluster, currentSpec, c.newClusterSpec, c.provider).Return(networkingChangeDiff, nil),
c.capiManager.EXPECT().Upgrade(c.ctx, managementCluster, c.provider, currentSpec, c.newClusterSpec).Return(capiChangeDiff, nil),
c.gitOpsManager.EXPECT().Install(c.ctx, managementCluster, currentSpec, c.newClusterSpec).Return(nil),
c.gitOpsManager.EXPECT().Upgrade(c.ctx, managementCluster, currentSpec, c.newClusterSpec).Return(fluxChangeDiff, nil),
c.clusterManager.EXPECT().Upgrade(c.ctx, managementCluster, currentSpec, c.newClusterSpec).Return(eksaChangeDiff, nil),
c.eksdUpgrader.EXPECT().Upgrade(c.ctx, managementCluster, currentSpec, c.newClusterSpec).Return(eksdChangeDiff, nil),
)
}
func (c *upgradeTestSetup) expectForceCleanupBootstrap() {
c.bootstrapper.EXPECT().DeleteBootstrapCluster(c.ctx, gomock.Not(gomock.Nil()), gomock.Not(gomock.Nil()), true).Return(nil)
}
func (c *upgradeTestSetup) expectForceCleanupBootstrapError() {
c.bootstrapper.EXPECT().DeleteBootstrapCluster(c.ctx, gomock.Not(gomock.Nil()), gomock.Not(gomock.Nil()), true).Return(errors.New("test error"))
}
func (c *upgradeTestSetup) expectCreateBootstrap() {
opts := []bootstrapper.BootstrapClusterOption{
bootstrapper.WithExtraDockerMounts(),
}
gomock.InOrder(
c.provider.EXPECT().BootstrapClusterOpts(c.newClusterSpec).Return(opts, nil),
c.bootstrapper.EXPECT().CreateBootstrapCluster(
c.ctx, gomock.Not(gomock.Nil()), gomock.Not(gomock.Nil()),
).Return(c.bootstrapCluster, nil),
c.provider.EXPECT().PreCAPIInstallOnBootstrap(c.ctx, c.bootstrapCluster, c.newClusterSpec),
c.provider.EXPECT().PostBootstrapSetupUpgrade(c.ctx, c.newClusterSpec.Cluster, c.bootstrapCluster),
c.clusterManager.EXPECT().InstallCAPI(c.ctx, gomock.Not(gomock.Nil()), c.bootstrapCluster, c.provider),
)
}
func (c *upgradeTestSetup) expectNotToCreateBootstrap() {
c.provider.EXPECT().BootstrapClusterOpts(c.newClusterSpec).Times(0)
c.bootstrapper.EXPECT().CreateBootstrapCluster(
c.ctx, gomock.Not(gomock.Nil()), gomock.Not(gomock.Nil()),
).Times(0)
c.clusterManager.EXPECT().InstallCAPI(c.ctx, gomock.Not(gomock.Nil()), c.bootstrapCluster, c.provider).Times(0)
}
func (c *upgradeTestSetup) expectWriteClusterConfig() {
gomock.InOrder(
c.provider.EXPECT().DatacenterConfig(c.newClusterSpec).Return(c.datacenterConfig),
c.provider.EXPECT().MachineConfigs(c.newClusterSpec).Return(c.machineConfigs),
c.writer.EXPECT().Write("cluster-name-eks-a-cluster.yaml", gomock.Any(), gomock.Any()),
)
}
func (c *upgradeTestSetup) expectDeleteBootstrap() {
gomock.InOrder(
c.bootstrapper.EXPECT().DeleteBootstrapCluster(
c.ctx, c.bootstrapCluster,
gomock.Any(), gomock.Any()).Return(nil),
)
}
func (c *upgradeTestSetup) expectNotToDeleteBootstrap() {
c.bootstrapper.EXPECT().DeleteBootstrapCluster(c.ctx, c.bootstrapCluster, gomock.Any(), gomock.Any()).Times(0)
}
func (c *upgradeTestSetup) expectUpgradeWorkload(managementCluster *types.Cluster, workloadCluster *types.Cluster) {
calls := []*gomock.Call{
c.expectPrepareUpgradeWorkload(managementCluster, workloadCluster),
c.expectUpgradeWorkloadToReturn(managementCluster, workloadCluster, nil),
c.clusterUpgrader.EXPECT().CleanupAfterUpgrade(c.ctx,
c.newClusterSpec,
managementCluster.KubeconfigFile, //nolint
workloadCluster.KubeconfigFile,
),
}
if managementCluster != nil && managementCluster.ExistingManagement {
calls = append(calls, c.clusterManager.EXPECT().ApplyBundles(c.ctx, c.newClusterSpec, managementCluster))
} else {
calls = append(calls, c.clusterManager.EXPECT().ApplyBundles(c.ctx, c.newClusterSpec, workloadCluster))
}
gomock.InOrder(calls...)
}
func (c *upgradeTestSetup) expectUpgradeWorkloadToReturn(managementCluster *types.Cluster, workloadCluster *types.Cluster, err error) *gomock.Call {
return c.clusterManager.EXPECT().UpgradeCluster(
c.ctx, managementCluster, workloadCluster, c.newClusterSpec, c.provider,
).Return(err)
}
func (c *upgradeTestSetup) expectPrepareUpgradeWorkload(managementCluster *types.Cluster, workloadCluster *types.Cluster) *gomock.Call {
return c.clusterUpgrader.EXPECT().PrepareUpgrade(c.ctx,
c.newClusterSpec,
managementCluster.KubeconfigFile,
workloadCluster.KubeconfigFile,
)
}
func (c *upgradeTestSetup) expectMoveManagementToBootstrap() {
gomock.InOrder(
c.clusterManager.EXPECT().BackupCAPI(c.ctx, c.managementCluster, c.managementStatePath),
c.clusterManager.EXPECT().PauseCAPIWorkloadClusters(c.ctx, c.managementCluster),
c.clusterManager.EXPECT().MoveCAPI(
c.ctx, c.managementCluster, c.bootstrapCluster, gomock.Any(), c.newClusterSpec, gomock.Any(),
),
c.provider.EXPECT().PostMoveManagementToBootstrap(
c.ctx, c.bootstrapCluster,
),
)
}
func (c *upgradeTestSetup) expectBackupManagementToBootstrapFailed() {
gomock.InOrder(
c.clusterManager.EXPECT().BackupCAPI(c.ctx, c.managementCluster, c.managementStatePath).Return(fmt.Errorf("backup management failed")),
)
}
func (c *upgradeTestSetup) expectNotToMoveManagementToBootstrap() {
c.clusterManager.EXPECT().MoveCAPI(c.ctx, c.managementCluster, c.bootstrapCluster, gomock.Any(), c.newClusterSpec, gomock.Any()).Times(0)
}
func (c *upgradeTestSetup) expectMoveManagementToWorkload() {
gomock.InOrder(
c.clusterManager.EXPECT().MoveCAPI(
c.ctx, c.bootstrapCluster, c.managementCluster, gomock.Any(), c.newClusterSpec, gomock.Any(),
),
c.clusterManager.EXPECT().ResumeCAPIWorkloadClusters(c.ctx, c.managementCluster),
)
}
func (c *upgradeTestSetup) expectNotToMoveManagementToWorkload() {
c.clusterManager.EXPECT().MoveCAPI(c.ctx, c.bootstrapCluster, c.managementCluster, gomock.Any(), c.newClusterSpec, gomock.Any()).Times(0)
}
func (c *upgradeTestSetup) expectPauseEKSAControllerReconcile(expectedCluster *types.Cluster) {
gomock.InOrder(
c.clusterManager.EXPECT().PauseEKSAControllerReconcile(
c.ctx, expectedCluster, c.currentClusterSpec, c.provider,
),
)
}
func (c *upgradeTestSetup) expectResumeEKSAControllerReconcile(expectedCluster *types.Cluster) {
gomock.InOrder(
c.clusterManager.EXPECT().ResumeEKSAControllerReconcile(
c.ctx, expectedCluster, c.newClusterSpec, c.provider,
),
)
}
func (c *upgradeTestSetup) expectPauseGitOpsReconcile(expectedCluster *types.Cluster) {
gomock.InOrder(
c.gitOpsManager.EXPECT().PauseClusterResourcesReconcile(
c.ctx, expectedCluster, c.newClusterSpec, c.provider,
),
)
}
func (c *upgradeTestSetup) expectDatacenterConfig() {
gomock.InOrder(
c.provider.EXPECT().DatacenterConfig(c.newClusterSpec).Return(c.datacenterConfig).AnyTimes(),
)
}
func (c *upgradeTestSetup) expectMachineConfigs() {
gomock.InOrder(
c.provider.EXPECT().MachineConfigs(c.newClusterSpec).Return(c.machineConfigs).AnyTimes(),
)
}
func (c *upgradeTestSetup) expectCreateEKSAResources(expectedCluster *types.Cluster) {
gomock.InOrder(
c.clusterManager.EXPECT().CreateEKSAResources(
c.ctx, expectedCluster, c.newClusterSpec, c.datacenterConfig, c.machineConfigs,
),
)
}
func (c *upgradeTestSetup) expectInstallEksdManifest(expectedCLuster *types.Cluster) {
gomock.InOrder(
c.eksdInstaller.EXPECT().InstallEksdManifest(
c.ctx, c.newClusterSpec, expectedCLuster,
),
)
}
func (c *upgradeTestSetup) expectUpdateGitEksaSpec() {
gomock.InOrder(
c.gitOpsManager.EXPECT().UpdateGitEksaSpec(
c.ctx, c.newClusterSpec, c.datacenterConfig, c.machineConfigs,
),
)
}
func (c *upgradeTestSetup) expectForceReconcileGitRepo(expectedCluster *types.Cluster) {
gomock.InOrder(
c.gitOpsManager.EXPECT().ForceReconcileGitRepo(
c.ctx, expectedCluster, c.newClusterSpec,
),
)
}
func (c *upgradeTestSetup) expectResumeGitOpsReconcile(expectedCluster *types.Cluster) {
gomock.InOrder(
c.gitOpsManager.EXPECT().ResumeClusterResourcesReconcile(
c.ctx, expectedCluster, c.newClusterSpec, c.provider,
),
)
}
func (c *upgradeTestSetup) expectPostBootstrapDeleteForUpgrade() {
gomock.InOrder(
c.provider.EXPECT().PostBootstrapDeleteForUpgrade(c.ctx),
)
}
func (c *upgradeTestSetup) expectVerifyClusterSpecChanged(expectedCluster *types.Cluster) {
gomock.InOrder(
c.clusterManager.EXPECT().EKSAClusterSpecChanged(c.ctx, expectedCluster, c.newClusterSpec).Return(true, nil),
)
}
func (c *upgradeTestSetup) expectSaveLogs(expectedWorkloadCluster *types.Cluster) {
gomock.InOrder(
c.clusterManager.EXPECT().SaveLogsManagementCluster(c.ctx, c.newClusterSpec, c.bootstrapCluster).Return(nil),
c.clusterManager.EXPECT().SaveLogsWorkloadCluster(c.ctx, c.provider, c.newClusterSpec, expectedWorkloadCluster),
)
}
func (c *upgradeTestSetup) expectWriteCheckpointFile() {
gomock.InOrder(
c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.newClusterSpec.Cluster.Name), gomock.Any()),
)
}
func (c *upgradeTestSetup) expectPreCoreComponentsUpgrade() {
c.provider.EXPECT().PreCoreComponentsUpgrade(gomock.Any(), gomock.Any(), gomock.Any())
}
func (c *upgradeTestSetup) run() error {
return c.workflow.Run(c.ctx, c.newClusterSpec, c.managementCluster, c.workloadCluster, c.validator, c.forceCleanup)
}
func (c *upgradeTestSetup) expectProviderNoUpgradeNeeded(expectedCluster *types.Cluster) {
c.provider.EXPECT().UpgradeNeeded(c.ctx, c.newClusterSpec, c.currentClusterSpec, expectedCluster).Return(false, nil)
}
func (c *upgradeTestSetup) expectProviderUpgradeNeeded() {
c.provider.EXPECT().UpgradeNeeded(c.ctx, c.newClusterSpec, c.currentClusterSpec, c.workloadCluster).Return(true, nil)
}
func (c *upgradeTestSetup) expectVerifyClusterSpecNoChanges() {
gomock.InOrder(
c.clusterManager.EXPECT().EKSAClusterSpecChanged(c.ctx, c.workloadCluster, c.newClusterSpec).Return(false, nil),
)
}
func (c *upgradeTestSetup) expectCreateBootstrapNotToBeCalled() {
c.provider.EXPECT().BootstrapClusterOpts(c.newClusterSpec).Times(0)
c.bootstrapper.EXPECT().CreateBootstrapCluster(c.ctx, gomock.Not(gomock.Nil()), gomock.Not(gomock.Nil())).Times(0)
c.clusterManager.EXPECT().InstallCAPI(c.ctx, gomock.Not(gomock.Nil()), c.bootstrapCluster, c.provider).Times(0)
}
func (c *upgradeTestSetup) expectPreflightValidationsToPass() {
c.validator.EXPECT().PreflightValidations(c.ctx).Return(nil)
}
func TestSkipUpgradeRunSuccess(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeSelfManagedClusterTest(t)
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.workloadCluster)
test.expectVerifyClusterSpecNoChanges()
test.expectDatacenterConfig()
test.expectMachineConfigs()
test.expectCreateEKSAResources(test.workloadCluster)
test.expectInstallEksdManifest(test.workloadCluster)
test.expectResumeEKSAControllerReconcile(test.workloadCluster)
test.expectUpdateGitEksaSpec()
test.expectForceReconcileGitRepo(test.workloadCluster)
test.expectResumeGitOpsReconcile(test.workloadCluster)
test.expectCreateBootstrapNotToBeCalled()
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err != nil {
t.Fatalf("Upgrade.Run() err = %v, want err = nil", err)
}
}
func TestUpgradeRunSuccess(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeSelfManagedClusterTest(t)
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.workloadCluster)
test.expectVerifyClusterSpecChanged(test.workloadCluster)
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectCreateBootstrap()
test.expectMoveManagementToBootstrap()
test.expectUpgradeWorkload(test.bootstrapCluster, test.workloadCluster)
test.expectMoveManagementToWorkload()
test.expectWriteClusterConfig()
test.expectDeleteBootstrap()
test.expectDatacenterConfig()
test.expectMachineConfigs()
test.expectCreateEKSAResources(test.workloadCluster)
test.expectInstallEksdManifest(test.workloadCluster)
test.expectResumeEKSAControllerReconcile(test.workloadCluster)
test.expectUpdateGitEksaSpec()
test.expectForceReconcileGitRepo(test.workloadCluster)
test.expectResumeGitOpsReconcile(test.workloadCluster)
test.expectPostBootstrapDeleteForUpgrade()
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err != nil {
t.Fatalf("Upgrade.Run() err = %v, want err = nil", err)
}
}
func TestUpgradeRunSuccessForceCleanup(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeSelfManagedClusterTest(t).WithForceCleanup()
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.workloadCluster)
test.expectVerifyClusterSpecChanged(test.workloadCluster)
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectForceCleanupBootstrap()
test.expectCreateBootstrap()
test.expectMoveManagementToBootstrap()
test.expectUpgradeWorkload(test.bootstrapCluster, test.workloadCluster)
test.expectMoveManagementToWorkload()
test.expectWriteClusterConfig()
test.expectDeleteBootstrap()
test.expectDatacenterConfig()
test.expectMachineConfigs()
test.expectCreateEKSAResources(test.workloadCluster)
test.expectInstallEksdManifest(test.workloadCluster)
test.expectResumeEKSAControllerReconcile(test.workloadCluster)
test.expectUpdateGitEksaSpec()
test.expectForceReconcileGitRepo(test.workloadCluster)
test.expectResumeGitOpsReconcile(test.workloadCluster)
test.expectPostBootstrapDeleteForUpgrade()
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err != nil {
t.Fatalf("Upgrade.Run() err = %v, want err = nil", err)
}
}
func TestUpgradeRunProviderNeedsUpgradeSuccess(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeSelfManagedClusterTest(t)
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderUpgradeNeeded()
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectCreateBootstrap()
test.expectMoveManagementToBootstrap()
test.expectUpgradeWorkload(test.bootstrapCluster, test.workloadCluster)
test.expectMoveManagementToWorkload()
test.expectWriteClusterConfig()
test.expectDeleteBootstrap()
test.expectDatacenterConfig()
test.expectMachineConfigs()
test.expectCreateEKSAResources(test.workloadCluster)
test.expectInstallEksdManifest(test.workloadCluster)
test.expectResumeEKSAControllerReconcile(test.workloadCluster)
test.expectUpdateGitEksaSpec()
test.expectForceReconcileGitRepo(test.workloadCluster)
test.expectResumeGitOpsReconcile(test.workloadCluster)
test.expectPostBootstrapDeleteForUpgrade()
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err != nil {
t.Fatalf("Upgrade.Run() err = %v, want err = nil", err)
}
}
func TestUpgradeWorkloadRunFailedForceCleanupBootstrap(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeSelfManagedClusterTest(t).WithForceCleanup()
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.workloadCluster)
test.expectVerifyClusterSpecChanged(test.workloadCluster)
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectForceCleanupBootstrapError()
test.expectPreCoreComponentsUpgrade()
test.expectWriteCheckpointFile()
err := test.run()
if err == nil {
t.Fatal("Upgrade.Run() err = nil, want err not nil")
}
}
func TestUpgradeRunFailedUpgrade(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeSelfManagedClusterTest(t)
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.workloadCluster)
test.expectVerifyClusterSpecChanged(test.workloadCluster)
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectCreateBootstrap()
test.expectMoveManagementToBootstrap()
test.expectPrepareUpgradeWorkload(test.bootstrapCluster, test.workloadCluster)
test.expectUpgradeWorkloadToReturn(test.bootstrapCluster, test.workloadCluster, errors.New("failed upgrading"))
test.expectSaveLogs(test.workloadCluster)
test.expectWriteCheckpointFile()
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err == nil {
t.Fatal("Upgrade.Run() err = nil, want err not nil")
}
}
func TestUpgradeRunFailedBackupManagementUpgrade(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeSelfManagedClusterTest(t)
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.workloadCluster)
test.expectVerifyClusterSpecChanged(test.workloadCluster)
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectCreateBootstrap()
test.expectBackupManagementToBootstrapFailed()
test.expectSaveLogs(test.workloadCluster)
test.expectWriteCheckpointFile()
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err == nil {
t.Fatal("Upgrade.Run() err = nil, want err not nil")
}
}
func TestUpgradeWorkloadRunSuccess(t *testing.T) {
os.Unsetenv(features.CheckpointEnabledEnvVar)
test := newUpgradeManagedClusterTest(t)
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.managementCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.managementCluster)
test.expectUpgradeCoreComponents(test.managementCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.managementCluster)
test.expectVerifyClusterSpecChanged(test.managementCluster)
test.expectPauseEKSAControllerReconcile(test.managementCluster)
test.expectPauseGitOpsReconcile(test.managementCluster)
test.expectNotToCreateBootstrap()
test.expectNotToMoveManagementToBootstrap()
test.expectNotToMoveManagementToWorkload()
test.expectWriteClusterConfig()
test.expectNotToDeleteBootstrap()
test.expectDatacenterConfig()
test.expectMachineConfigs()
test.expectCreateEKSAResources(test.managementCluster)
test.expectInstallEksdManifest(test.managementCluster)
test.expectResumeEKSAControllerReconcile(test.managementCluster)
test.expectUpdateGitEksaSpec()
test.expectForceReconcileGitRepo(test.managementCluster)
test.expectResumeGitOpsReconcile(test.managementCluster)
test.expectUpgradeWorkload(test.managementCluster, test.workloadCluster)
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err != nil {
t.Fatalf("Upgrade.Run() err = %v, want err = nil", err)
}
}
func TestUpgradeWithCheckpointFirstRunFailed(t *testing.T) {
features.ClearCache()
t.Setenv(features.CheckpointEnabledEnvVar, "true")
test := newUpgradeSelfManagedClusterTest(t)
test.writer.EXPECT().TempDir()
test.expectSetupToFail()
test.expectWriteCheckpointFile()
err := test.run()
if err == nil {
t.Fatal("Upgrade.Run() err = nil, want err not nil")
}
}
func TestUpgradeWithCheckpointSecondRunSuccess(t *testing.T) {
features.ClearCache()
t.Setenv(features.CheckpointEnabledEnvVar, "true")
test := newUpgradeSelfManagedClusterTest(t)
test.writer.EXPECT().TempDir()
test.expectSetup()
test.expectPreflightValidationsToPass()
test.expectUpdateSecrets(test.workloadCluster)
test.expectEnsureEtcdCAPIComponentsExistTask(test.workloadCluster)
test.expectUpgradeCoreComponents(test.workloadCluster, test.workloadCluster)
test.expectProviderNoUpgradeNeeded(test.workloadCluster)
test.expectVerifyClusterSpecChanged(test.workloadCluster)
test.expectPauseEKSAControllerReconcile(test.workloadCluster)
test.expectPauseGitOpsReconcile(test.workloadCluster)
test.expectCreateBootstrap()
test.expectMoveManagementToBootstrap()
test.expectPrepareUpgradeWorkload(test.bootstrapCluster, test.workloadCluster)
test.expectUpgradeWorkloadToReturn(test.bootstrapCluster, test.workloadCluster, errors.New("failed upgrading"))
test.expectSaveLogs(test.workloadCluster)
test.expectWriteCheckpointFile()
test.expectPreCoreComponentsUpgrade()
err := test.run()
if err == nil {
t.Fatal("Upgrade.Run() err = nil, want err not nil")
}
test2 := newUpgradeSelfManagedClusterTest(t)
test2.writer.EXPECT().TempDir().Return("testdata")
test2.expectSetup()
test2.expectUpgradeWorkload(test2.bootstrapCluster, test2.workloadCluster)
test2.expectMoveManagementToWorkload()
test2.expectWriteClusterConfig()
test2.expectDeleteBootstrap()
test2.expectDatacenterConfig()
test2.expectMachineConfigs()
test2.expectCreateEKSAResources(test2.workloadCluster)
test2.expectInstallEksdManifest(test2.workloadCluster)
test2.expectResumeEKSAControllerReconcile(test2.workloadCluster)
test2.expectUpdateGitEksaSpec()
test2.expectForceReconcileGitRepo(test2.workloadCluster)
test2.expectResumeGitOpsReconcile(test2.workloadCluster)
test2.expectPostBootstrapDeleteForUpgrade()
err = test2.run()
if err != nil {
t.Fatalf("Upgrade.Run() err = %v, want nil", err)
}
}
| 756 |
eks-anywhere | aws | Go | package interfaces
import (
"context"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
)
type Bootstrapper interface {
CreateBootstrapCluster(ctx context.Context, clusterSpec *cluster.Spec, opts ...bootstrapper.BootstrapClusterOption) (*types.Cluster, error)
DeleteBootstrapCluster(context.Context, *types.Cluster, constants.Operation, bool) error
}
type ClusterManager interface {
BackupCAPI(ctx context.Context, cluster *types.Cluster, managementStatePath string) error
MoveCAPI(ctx context.Context, from, to *types.Cluster, clusterName string, clusterSpec *cluster.Spec, checkers ...types.NodeReadyChecker) error
CreateWorkloadCluster(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) (*types.Cluster, error)
PauseCAPIWorkloadClusters(ctx context.Context, managementCluster *types.Cluster) error
ResumeCAPIWorkloadClusters(ctx context.Context, managementCluster *types.Cluster) error
RunPostCreateWorkloadCluster(ctx context.Context, managementCluster, workloadCluster *types.Cluster, clusterSpec *cluster.Spec) error
UpgradeCluster(ctx context.Context, managementCluster, workloadCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error
DeleteCluster(ctx context.Context, managementCluster, clusterToDelete *types.Cluster, provider providers.Provider, clusterSpec *cluster.Spec) error
InstallCAPI(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider) error
InstallNetworking(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error
UpgradeNetworking(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec, provider providers.Provider) (*types.ChangeDiff, error)
SaveLogsManagementCluster(ctx context.Context, spec *cluster.Spec, cluster *types.Cluster) error
SaveLogsWorkloadCluster(ctx context.Context, provider providers.Provider, spec *cluster.Spec, cluster *types.Cluster) error
InstallCustomComponents(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider) error
CreateEKSANamespace(ctx context.Context, cluster *types.Cluster) error
CreateEKSAResources(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) error
ApplyBundles(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error
PauseEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error
ResumeEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error
EKSAClusterSpecChanged(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) (bool, error)
InstallMachineHealthChecks(ctx context.Context, clusterSpec *cluster.Spec, workloadCluster *types.Cluster) error
GetCurrentClusterSpec(ctx context.Context, cluster *types.Cluster, clusterName string) (*cluster.Spec, error)
Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error)
InstallAwsIamAuth(ctx context.Context, managementCluster, workloadCluster *types.Cluster, clusterSpec *cluster.Spec) error
CreateAwsIamAuthCaSecret(ctx context.Context, bootstrapCluster *types.Cluster, workloadClusterName string) error
DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error
}
type GitOpsManager interface {
InstallGitOps(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) error
PauseClusterResourcesReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error
ResumeClusterResourcesReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error
UpdateGitEksaSpec(ctx context.Context, clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) error
ForceReconcileGitRepo(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
Validations(ctx context.Context, clusterSpec *cluster.Spec) []validations.Validation
CleanupGitRepo(ctx context.Context, clusterSpec *cluster.Spec) error
Install(ctx context.Context, cluster *types.Cluster, oldSpec, newSpec *cluster.Spec) error
Upgrade(ctx context.Context, cluster *types.Cluster, oldSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error)
}
type Validator interface {
PreflightValidations(ctx context.Context) []validations.Validation
}
type CAPIManager interface {
Upgrade(ctx context.Context, managementCluster *types.Cluster, provider providers.Provider, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error)
EnsureEtcdProvidersInstallation(ctx context.Context, managementCluster *types.Cluster, provider providers.Provider, currSpec *cluster.Spec) error
}
type EksdInstaller interface {
InstallEksdCRDs(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error
InstallEksdManifest(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error
}
type EksdUpgrader interface {
Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error)
}
type PackageInstaller interface {
InstallCuratedPackages(ctx context.Context)
}
// ClusterUpgrader prepares the cluster for an upgrade.
type ClusterUpgrader interface {
PrepareUpgrade(ctx context.Context, spec *cluster.Spec, managementClusterKubeconfigPath, workloadClusterKubeconfigPath string) error
CleanupAfterUpgrade(ctx context.Context, spec *cluster.Spec, managementClusterKubeconfigPath, workloadClusterKubeconfigPath string) error
}
| 87 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
bootstrapper "github.com/aws/eks-anywhere/pkg/bootstrapper"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
constants "github.com/aws/eks-anywhere/pkg/constants"
providers "github.com/aws/eks-anywhere/pkg/providers"
types "github.com/aws/eks-anywhere/pkg/types"
validations "github.com/aws/eks-anywhere/pkg/validations"
gomock "github.com/golang/mock/gomock"
)
// MockBootstrapper is a mock of Bootstrapper interface.
type MockBootstrapper struct {
ctrl *gomock.Controller
recorder *MockBootstrapperMockRecorder
}
// MockBootstrapperMockRecorder is the mock recorder for MockBootstrapper.
type MockBootstrapperMockRecorder struct {
mock *MockBootstrapper
}
// NewMockBootstrapper creates a new mock instance.
func NewMockBootstrapper(ctrl *gomock.Controller) *MockBootstrapper {
mock := &MockBootstrapper{ctrl: ctrl}
mock.recorder = &MockBootstrapperMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockBootstrapper) EXPECT() *MockBootstrapperMockRecorder {
return m.recorder
}
// CreateBootstrapCluster mocks base method.
func (m *MockBootstrapper) CreateBootstrapCluster(arg0 context.Context, arg1 *cluster.Spec, arg2 ...bootstrapper.BootstrapClusterOption) (*types.Cluster, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CreateBootstrapCluster", varargs...)
ret0, _ := ret[0].(*types.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateBootstrapCluster indicates an expected call of CreateBootstrapCluster.
func (mr *MockBootstrapperMockRecorder) CreateBootstrapCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBootstrapCluster", reflect.TypeOf((*MockBootstrapper)(nil).CreateBootstrapCluster), varargs...)
}
// DeleteBootstrapCluster mocks base method.
func (m *MockBootstrapper) DeleteBootstrapCluster(arg0 context.Context, arg1 *types.Cluster, arg2 constants.Operation, arg3 bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteBootstrapCluster", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteBootstrapCluster indicates an expected call of DeleteBootstrapCluster.
func (mr *MockBootstrapperMockRecorder) DeleteBootstrapCluster(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBootstrapCluster", reflect.TypeOf((*MockBootstrapper)(nil).DeleteBootstrapCluster), arg0, arg1, arg2, arg3)
}
// MockClusterManager is a mock of ClusterManager interface.
type MockClusterManager struct {
ctrl *gomock.Controller
recorder *MockClusterManagerMockRecorder
}
// MockClusterManagerMockRecorder is the mock recorder for MockClusterManager.
type MockClusterManagerMockRecorder struct {
mock *MockClusterManager
}
// NewMockClusterManager creates a new mock instance.
func NewMockClusterManager(ctrl *gomock.Controller) *MockClusterManager {
mock := &MockClusterManager{ctrl: ctrl}
mock.recorder = &MockClusterManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClusterManager) EXPECT() *MockClusterManagerMockRecorder {
return m.recorder
}
// ApplyBundles mocks base method.
func (m *MockClusterManager) ApplyBundles(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyBundles", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyBundles indicates an expected call of ApplyBundles.
func (mr *MockClusterManagerMockRecorder) ApplyBundles(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyBundles", reflect.TypeOf((*MockClusterManager)(nil).ApplyBundles), arg0, arg1, arg2)
}
// BackupCAPI mocks base method.
func (m *MockClusterManager) BackupCAPI(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BackupCAPI", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// BackupCAPI indicates an expected call of BackupCAPI.
func (mr *MockClusterManagerMockRecorder) BackupCAPI(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupCAPI", reflect.TypeOf((*MockClusterManager)(nil).BackupCAPI), arg0, arg1, arg2)
}
// CreateAwsIamAuthCaSecret mocks base method.
func (m *MockClusterManager) CreateAwsIamAuthCaSecret(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateAwsIamAuthCaSecret", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateAwsIamAuthCaSecret indicates an expected call of CreateAwsIamAuthCaSecret.
func (mr *MockClusterManagerMockRecorder) CreateAwsIamAuthCaSecret(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAwsIamAuthCaSecret", reflect.TypeOf((*MockClusterManager)(nil).CreateAwsIamAuthCaSecret), arg0, arg1, arg2)
}
// CreateEKSANamespace mocks base method.
func (m *MockClusterManager) CreateEKSANamespace(arg0 context.Context, arg1 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateEKSANamespace", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateEKSANamespace indicates an expected call of CreateEKSANamespace.
func (mr *MockClusterManagerMockRecorder) CreateEKSANamespace(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEKSANamespace", reflect.TypeOf((*MockClusterManager)(nil).CreateEKSANamespace), arg0, arg1)
}
// CreateEKSAResources mocks base method.
func (m *MockClusterManager) CreateEKSAResources(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.DatacenterConfig, arg4 []providers.MachineConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateEKSAResources", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// CreateEKSAResources indicates an expected call of CreateEKSAResources.
func (mr *MockClusterManagerMockRecorder) CreateEKSAResources(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEKSAResources", reflect.TypeOf((*MockClusterManager)(nil).CreateEKSAResources), arg0, arg1, arg2, arg3, arg4)
}
// CreateWorkloadCluster mocks base method.
func (m *MockClusterManager) CreateWorkloadCluster(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) (*types.Cluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateWorkloadCluster", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*types.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateWorkloadCluster indicates an expected call of CreateWorkloadCluster.
func (mr *MockClusterManagerMockRecorder) CreateWorkloadCluster(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateWorkloadCluster", reflect.TypeOf((*MockClusterManager)(nil).CreateWorkloadCluster), arg0, arg1, arg2, arg3)
}
// DeleteCluster mocks base method.
func (m *MockClusterManager) DeleteCluster(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 providers.Provider, arg4 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteCluster", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteCluster indicates an expected call of DeleteCluster.
func (mr *MockClusterManagerMockRecorder) DeleteCluster(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockClusterManager)(nil).DeleteCluster), arg0, arg1, arg2, arg3, arg4)
}
// DeletePackageResources mocks base method.
func (m *MockClusterManager) DeletePackageResources(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeletePackageResources", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeletePackageResources indicates an expected call of DeletePackageResources.
func (mr *MockClusterManagerMockRecorder) DeletePackageResources(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePackageResources", reflect.TypeOf((*MockClusterManager)(nil).DeletePackageResources), arg0, arg1, arg2)
}
// EKSAClusterSpecChanged mocks base method.
func (m *MockClusterManager) EKSAClusterSpecChanged(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EKSAClusterSpecChanged", arg0, arg1, arg2)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EKSAClusterSpecChanged indicates an expected call of EKSAClusterSpecChanged.
func (mr *MockClusterManagerMockRecorder) EKSAClusterSpecChanged(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EKSAClusterSpecChanged", reflect.TypeOf((*MockClusterManager)(nil).EKSAClusterSpecChanged), arg0, arg1, arg2)
}
// GetCurrentClusterSpec mocks base method.
func (m *MockClusterManager) GetCurrentClusterSpec(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*cluster.Spec, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCurrentClusterSpec", arg0, arg1, arg2)
ret0, _ := ret[0].(*cluster.Spec)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCurrentClusterSpec indicates an expected call of GetCurrentClusterSpec.
func (mr *MockClusterManagerMockRecorder) GetCurrentClusterSpec(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentClusterSpec", reflect.TypeOf((*MockClusterManager)(nil).GetCurrentClusterSpec), arg0, arg1, arg2)
}
// InstallAwsIamAuth mocks base method.
func (m *MockClusterManager) InstallAwsIamAuth(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallAwsIamAuth", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// InstallAwsIamAuth indicates an expected call of InstallAwsIamAuth.
func (mr *MockClusterManagerMockRecorder) InstallAwsIamAuth(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallAwsIamAuth", reflect.TypeOf((*MockClusterManager)(nil).InstallAwsIamAuth), arg0, arg1, arg2, arg3)
}
// InstallCAPI mocks base method.
func (m *MockClusterManager) InstallCAPI(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallCAPI", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// InstallCAPI indicates an expected call of InstallCAPI.
func (mr *MockClusterManagerMockRecorder) InstallCAPI(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCAPI", reflect.TypeOf((*MockClusterManager)(nil).InstallCAPI), arg0, arg1, arg2, arg3)
}
// InstallCustomComponents mocks base method.
func (m *MockClusterManager) InstallCustomComponents(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallCustomComponents", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// InstallCustomComponents indicates an expected call of InstallCustomComponents.
func (mr *MockClusterManagerMockRecorder) InstallCustomComponents(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCustomComponents", reflect.TypeOf((*MockClusterManager)(nil).InstallCustomComponents), arg0, arg1, arg2, arg3)
}
// InstallMachineHealthChecks mocks base method.
func (m *MockClusterManager) InstallMachineHealthChecks(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallMachineHealthChecks", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// InstallMachineHealthChecks indicates an expected call of InstallMachineHealthChecks.
func (mr *MockClusterManagerMockRecorder) InstallMachineHealthChecks(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallMachineHealthChecks", reflect.TypeOf((*MockClusterManager)(nil).InstallMachineHealthChecks), arg0, arg1, arg2)
}
// InstallNetworking mocks base method.
func (m *MockClusterManager) InstallNetworking(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallNetworking", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// InstallNetworking indicates an expected call of InstallNetworking.
func (mr *MockClusterManagerMockRecorder) InstallNetworking(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallNetworking", reflect.TypeOf((*MockClusterManager)(nil).InstallNetworking), arg0, arg1, arg2, arg3)
}
// MoveCAPI mocks base method.
func (m *MockClusterManager) MoveCAPI(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 string, arg4 *cluster.Spec, arg5 ...types.NodeReadyChecker) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2, arg3, arg4}
for _, a := range arg5 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "MoveCAPI", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// MoveCAPI indicates an expected call of MoveCAPI.
func (mr *MockClusterManagerMockRecorder) MoveCAPI(arg0, arg1, arg2, arg3, arg4 interface{}, arg5 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2, arg3, arg4}, arg5...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveCAPI", reflect.TypeOf((*MockClusterManager)(nil).MoveCAPI), varargs...)
}
// PauseCAPIWorkloadClusters mocks base method.
func (m *MockClusterManager) PauseCAPIWorkloadClusters(arg0 context.Context, arg1 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PauseCAPIWorkloadClusters", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// PauseCAPIWorkloadClusters indicates an expected call of PauseCAPIWorkloadClusters.
func (mr *MockClusterManagerMockRecorder) PauseCAPIWorkloadClusters(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseCAPIWorkloadClusters", reflect.TypeOf((*MockClusterManager)(nil).PauseCAPIWorkloadClusters), arg0, arg1)
}
// PauseEKSAControllerReconcile mocks base method.
func (m *MockClusterManager) PauseEKSAControllerReconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PauseEKSAControllerReconcile", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// PauseEKSAControllerReconcile indicates an expected call of PauseEKSAControllerReconcile.
func (mr *MockClusterManagerMockRecorder) PauseEKSAControllerReconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseEKSAControllerReconcile", reflect.TypeOf((*MockClusterManager)(nil).PauseEKSAControllerReconcile), arg0, arg1, arg2, arg3)
}
// ResumeCAPIWorkloadClusters mocks base method.
func (m *MockClusterManager) ResumeCAPIWorkloadClusters(arg0 context.Context, arg1 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResumeCAPIWorkloadClusters", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ResumeCAPIWorkloadClusters indicates an expected call of ResumeCAPIWorkloadClusters.
func (mr *MockClusterManagerMockRecorder) ResumeCAPIWorkloadClusters(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeCAPIWorkloadClusters", reflect.TypeOf((*MockClusterManager)(nil).ResumeCAPIWorkloadClusters), arg0, arg1)
}
// ResumeEKSAControllerReconcile mocks base method.
func (m *MockClusterManager) ResumeEKSAControllerReconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResumeEKSAControllerReconcile", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ResumeEKSAControllerReconcile indicates an expected call of ResumeEKSAControllerReconcile.
func (mr *MockClusterManagerMockRecorder) ResumeEKSAControllerReconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeEKSAControllerReconcile", reflect.TypeOf((*MockClusterManager)(nil).ResumeEKSAControllerReconcile), arg0, arg1, arg2, arg3)
}
// RunPostCreateWorkloadCluster mocks base method.
func (m *MockClusterManager) RunPostCreateWorkloadCluster(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunPostCreateWorkloadCluster", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// RunPostCreateWorkloadCluster indicates an expected call of RunPostCreateWorkloadCluster.
func (mr *MockClusterManagerMockRecorder) RunPostCreateWorkloadCluster(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPostCreateWorkloadCluster", reflect.TypeOf((*MockClusterManager)(nil).RunPostCreateWorkloadCluster), arg0, arg1, arg2, arg3)
}
// SaveLogsManagementCluster mocks base method.
func (m *MockClusterManager) SaveLogsManagementCluster(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveLogsManagementCluster", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// SaveLogsManagementCluster indicates an expected call of SaveLogsManagementCluster.
func (mr *MockClusterManagerMockRecorder) SaveLogsManagementCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveLogsManagementCluster", reflect.TypeOf((*MockClusterManager)(nil).SaveLogsManagementCluster), arg0, arg1, arg2)
}
// SaveLogsWorkloadCluster mocks base method.
func (m *MockClusterManager) SaveLogsWorkloadCluster(arg0 context.Context, arg1 providers.Provider, arg2 *cluster.Spec, arg3 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveLogsWorkloadCluster", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// SaveLogsWorkloadCluster indicates an expected call of SaveLogsWorkloadCluster.
func (mr *MockClusterManagerMockRecorder) SaveLogsWorkloadCluster(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveLogsWorkloadCluster", reflect.TypeOf((*MockClusterManager)(nil).SaveLogsWorkloadCluster), arg0, arg1, arg2, arg3)
}
// Upgrade mocks base method.
func (m *MockClusterManager) Upgrade(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 *cluster.Spec) (*types.ChangeDiff, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*types.ChangeDiff)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockClusterManagerMockRecorder) Upgrade(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockClusterManager)(nil).Upgrade), arg0, arg1, arg2, arg3)
}
// UpgradeCluster mocks base method.
func (m *MockClusterManager) UpgradeCluster(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 *cluster.Spec, arg4 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpgradeCluster", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// UpgradeCluster indicates an expected call of UpgradeCluster.
func (mr *MockClusterManagerMockRecorder) UpgradeCluster(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeCluster", reflect.TypeOf((*MockClusterManager)(nil).UpgradeCluster), arg0, arg1, arg2, arg3, arg4)
}
// UpgradeNetworking mocks base method.
func (m *MockClusterManager) UpgradeNetworking(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 *cluster.Spec, arg4 providers.Provider) (*types.ChangeDiff, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpgradeNetworking", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*types.ChangeDiff)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpgradeNetworking indicates an expected call of UpgradeNetworking.
func (mr *MockClusterManagerMockRecorder) UpgradeNetworking(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeNetworking", reflect.TypeOf((*MockClusterManager)(nil).UpgradeNetworking), arg0, arg1, arg2, arg3, arg4)
}
// MockGitOpsManager is a mock of GitOpsManager interface.
type MockGitOpsManager struct {
ctrl *gomock.Controller
recorder *MockGitOpsManagerMockRecorder
}
// MockGitOpsManagerMockRecorder is the mock recorder for MockGitOpsManager.
type MockGitOpsManagerMockRecorder struct {
mock *MockGitOpsManager
}
// NewMockGitOpsManager creates a new mock instance.
func NewMockGitOpsManager(ctrl *gomock.Controller) *MockGitOpsManager {
mock := &MockGitOpsManager{ctrl: ctrl}
mock.recorder = &MockGitOpsManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGitOpsManager) EXPECT() *MockGitOpsManagerMockRecorder {
return m.recorder
}
// CleanupGitRepo mocks base method.
func (m *MockGitOpsManager) CleanupGitRepo(arg0 context.Context, arg1 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupGitRepo", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CleanupGitRepo indicates an expected call of CleanupGitRepo.
func (mr *MockGitOpsManagerMockRecorder) CleanupGitRepo(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupGitRepo", reflect.TypeOf((*MockGitOpsManager)(nil).CleanupGitRepo), arg0, arg1)
}
// ForceReconcileGitRepo mocks base method.
func (m *MockGitOpsManager) ForceReconcileGitRepo(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ForceReconcileGitRepo", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ForceReconcileGitRepo indicates an expected call of ForceReconcileGitRepo.
func (mr *MockGitOpsManagerMockRecorder) ForceReconcileGitRepo(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceReconcileGitRepo", reflect.TypeOf((*MockGitOpsManager)(nil).ForceReconcileGitRepo), arg0, arg1, arg2)
}
// Install mocks base method.
func (m *MockGitOpsManager) Install(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Install", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Install indicates an expected call of Install.
func (mr *MockGitOpsManagerMockRecorder) Install(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockGitOpsManager)(nil).Install), arg0, arg1, arg2, arg3)
}
// InstallGitOps mocks base method.
func (m *MockGitOpsManager) InstallGitOps(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.DatacenterConfig, arg4 []providers.MachineConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallGitOps", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// InstallGitOps indicates an expected call of InstallGitOps.
func (mr *MockGitOpsManagerMockRecorder) InstallGitOps(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallGitOps", reflect.TypeOf((*MockGitOpsManager)(nil).InstallGitOps), arg0, arg1, arg2, arg3, arg4)
}
// PauseClusterResourcesReconcile mocks base method.
func (m *MockGitOpsManager) PauseClusterResourcesReconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PauseClusterResourcesReconcile", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// PauseClusterResourcesReconcile indicates an expected call of PauseClusterResourcesReconcile.
func (mr *MockGitOpsManagerMockRecorder) PauseClusterResourcesReconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseClusterResourcesReconcile", reflect.TypeOf((*MockGitOpsManager)(nil).PauseClusterResourcesReconcile), arg0, arg1, arg2, arg3)
}
// ResumeClusterResourcesReconcile mocks base method.
func (m *MockGitOpsManager) ResumeClusterResourcesReconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResumeClusterResourcesReconcile", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ResumeClusterResourcesReconcile indicates an expected call of ResumeClusterResourcesReconcile.
func (mr *MockGitOpsManagerMockRecorder) ResumeClusterResourcesReconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeClusterResourcesReconcile", reflect.TypeOf((*MockGitOpsManager)(nil).ResumeClusterResourcesReconcile), arg0, arg1, arg2, arg3)
}
// UpdateGitEksaSpec mocks base method.
func (m *MockGitOpsManager) UpdateGitEksaSpec(arg0 context.Context, arg1 *cluster.Spec, arg2 providers.DatacenterConfig, arg3 []providers.MachineConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateGitEksaSpec", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateGitEksaSpec indicates an expected call of UpdateGitEksaSpec.
func (mr *MockGitOpsManagerMockRecorder) UpdateGitEksaSpec(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGitEksaSpec", reflect.TypeOf((*MockGitOpsManager)(nil).UpdateGitEksaSpec), arg0, arg1, arg2, arg3)
}
// Upgrade mocks base method.
func (m *MockGitOpsManager) Upgrade(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 *cluster.Spec) (*types.ChangeDiff, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*types.ChangeDiff)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockGitOpsManagerMockRecorder) Upgrade(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockGitOpsManager)(nil).Upgrade), arg0, arg1, arg2, arg3)
}
// Validations mocks base method.
func (m *MockGitOpsManager) Validations(arg0 context.Context, arg1 *cluster.Spec) []validations.Validation {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Validations", arg0, arg1)
ret0, _ := ret[0].([]validations.Validation)
return ret0
}
// Validations indicates an expected call of Validations.
func (mr *MockGitOpsManagerMockRecorder) Validations(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validations", reflect.TypeOf((*MockGitOpsManager)(nil).Validations), arg0, arg1)
}
// MockValidator is a mock of Validator interface.
type MockValidator struct {
ctrl *gomock.Controller
recorder *MockValidatorMockRecorder
}
// MockValidatorMockRecorder is the mock recorder for MockValidator.
type MockValidatorMockRecorder struct {
mock *MockValidator
}
// NewMockValidator creates a new mock instance.
func NewMockValidator(ctrl *gomock.Controller) *MockValidator {
mock := &MockValidator{ctrl: ctrl}
mock.recorder = &MockValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockValidator) EXPECT() *MockValidatorMockRecorder {
return m.recorder
}
// PreflightValidations mocks base method.
func (m *MockValidator) PreflightValidations(arg0 context.Context) []validations.Validation {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PreflightValidations", arg0)
ret0, _ := ret[0].([]validations.Validation)
return ret0
}
// PreflightValidations indicates an expected call of PreflightValidations.
func (mr *MockValidatorMockRecorder) PreflightValidations(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreflightValidations", reflect.TypeOf((*MockValidator)(nil).PreflightValidations), arg0)
}
// MockCAPIManager is a mock of CAPIManager interface.
type MockCAPIManager struct {
ctrl *gomock.Controller
recorder *MockCAPIManagerMockRecorder
}
// MockCAPIManagerMockRecorder is the mock recorder for MockCAPIManager.
type MockCAPIManagerMockRecorder struct {
mock *MockCAPIManager
}
// NewMockCAPIManager creates a new mock instance.
func NewMockCAPIManager(ctrl *gomock.Controller) *MockCAPIManager {
mock := &MockCAPIManager{ctrl: ctrl}
mock.recorder = &MockCAPIManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCAPIManager) EXPECT() *MockCAPIManagerMockRecorder {
return m.recorder
}
// EnsureEtcdProvidersInstallation mocks base method.
func (m *MockCAPIManager) EnsureEtcdProvidersInstallation(arg0 context.Context, arg1 *types.Cluster, arg2 providers.Provider, arg3 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureEtcdProvidersInstallation", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// EnsureEtcdProvidersInstallation indicates an expected call of EnsureEtcdProvidersInstallation.
func (mr *MockCAPIManagerMockRecorder) EnsureEtcdProvidersInstallation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureEtcdProvidersInstallation", reflect.TypeOf((*MockCAPIManager)(nil).EnsureEtcdProvidersInstallation), arg0, arg1, arg2, arg3)
}
// Upgrade mocks base method.
func (m *MockCAPIManager) Upgrade(arg0 context.Context, arg1 *types.Cluster, arg2 providers.Provider, arg3, arg4 *cluster.Spec) (*types.ChangeDiff, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*types.ChangeDiff)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockCAPIManagerMockRecorder) Upgrade(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockCAPIManager)(nil).Upgrade), arg0, arg1, arg2, arg3, arg4)
}
// MockEksdInstaller is a mock of EksdInstaller interface.
type MockEksdInstaller struct {
ctrl *gomock.Controller
recorder *MockEksdInstallerMockRecorder
}
// MockEksdInstallerMockRecorder is the mock recorder for MockEksdInstaller.
type MockEksdInstallerMockRecorder struct {
mock *MockEksdInstaller
}
// NewMockEksdInstaller creates a new mock instance.
func NewMockEksdInstaller(ctrl *gomock.Controller) *MockEksdInstaller {
mock := &MockEksdInstaller{ctrl: ctrl}
mock.recorder = &MockEksdInstallerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockEksdInstaller) EXPECT() *MockEksdInstallerMockRecorder {
return m.recorder
}
// InstallEksdCRDs mocks base method.
func (m *MockEksdInstaller) InstallEksdCRDs(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallEksdCRDs", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// InstallEksdCRDs indicates an expected call of InstallEksdCRDs.
func (mr *MockEksdInstallerMockRecorder) InstallEksdCRDs(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallEksdCRDs", reflect.TypeOf((*MockEksdInstaller)(nil).InstallEksdCRDs), arg0, arg1, arg2)
}
// InstallEksdManifest mocks base method.
func (m *MockEksdInstaller) InstallEksdManifest(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallEksdManifest", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// InstallEksdManifest indicates an expected call of InstallEksdManifest.
func (mr *MockEksdInstallerMockRecorder) InstallEksdManifest(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallEksdManifest", reflect.TypeOf((*MockEksdInstaller)(nil).InstallEksdManifest), arg0, arg1, arg2)
}
// MockEksdUpgrader is a mock of EksdUpgrader interface.
type MockEksdUpgrader struct {
ctrl *gomock.Controller
recorder *MockEksdUpgraderMockRecorder
}
// MockEksdUpgraderMockRecorder is the mock recorder for MockEksdUpgrader.
type MockEksdUpgraderMockRecorder struct {
mock *MockEksdUpgrader
}
// NewMockEksdUpgrader creates a new mock instance.
func NewMockEksdUpgrader(ctrl *gomock.Controller) *MockEksdUpgrader {
mock := &MockEksdUpgrader{ctrl: ctrl}
mock.recorder = &MockEksdUpgraderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockEksdUpgrader) EXPECT() *MockEksdUpgraderMockRecorder {
return m.recorder
}
// Upgrade mocks base method.
func (m *MockEksdUpgrader) Upgrade(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 *cluster.Spec) (*types.ChangeDiff, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*types.ChangeDiff)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockEksdUpgraderMockRecorder) Upgrade(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockEksdUpgrader)(nil).Upgrade), arg0, arg1, arg2, arg3)
}
// MockPackageInstaller is a mock of PackageInstaller interface.
type MockPackageInstaller struct {
ctrl *gomock.Controller
recorder *MockPackageInstallerMockRecorder
}
// MockPackageInstallerMockRecorder is the mock recorder for MockPackageInstaller.
type MockPackageInstallerMockRecorder struct {
mock *MockPackageInstaller
}
// NewMockPackageInstaller creates a new mock instance.
func NewMockPackageInstaller(ctrl *gomock.Controller) *MockPackageInstaller {
mock := &MockPackageInstaller{ctrl: ctrl}
mock.recorder = &MockPackageInstallerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPackageInstaller) EXPECT() *MockPackageInstallerMockRecorder {
return m.recorder
}
// InstallCuratedPackages mocks base method.
func (m *MockPackageInstaller) InstallCuratedPackages(arg0 context.Context) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "InstallCuratedPackages", arg0)
}
// InstallCuratedPackages indicates an expected call of InstallCuratedPackages.
func (mr *MockPackageInstallerMockRecorder) InstallCuratedPackages(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCuratedPackages", reflect.TypeOf((*MockPackageInstaller)(nil).InstallCuratedPackages), arg0)
}
// MockClusterUpgrader is a mock of ClusterUpgrader interface.
type MockClusterUpgrader struct {
ctrl *gomock.Controller
recorder *MockClusterUpgraderMockRecorder
}
// MockClusterUpgraderMockRecorder is the mock recorder for MockClusterUpgrader.
type MockClusterUpgraderMockRecorder struct {
mock *MockClusterUpgrader
}
// NewMockClusterUpgrader creates a new mock instance.
func NewMockClusterUpgrader(ctrl *gomock.Controller) *MockClusterUpgrader {
mock := &MockClusterUpgrader{ctrl: ctrl}
mock.recorder = &MockClusterUpgraderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClusterUpgrader) EXPECT() *MockClusterUpgraderMockRecorder {
return m.recorder
}
// CleanupAfterUpgrade mocks base method.
func (m *MockClusterUpgrader) CleanupAfterUpgrade(arg0 context.Context, arg1 *cluster.Spec, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupAfterUpgrade", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// CleanupAfterUpgrade indicates an expected call of CleanupAfterUpgrade.
func (mr *MockClusterUpgraderMockRecorder) CleanupAfterUpgrade(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupAfterUpgrade", reflect.TypeOf((*MockClusterUpgrader)(nil).CleanupAfterUpgrade), arg0, arg1, arg2, arg3)
}
// PrepareUpgrade mocks base method.
func (m *MockClusterUpgrader) PrepareUpgrade(arg0 context.Context, arg1 *cluster.Spec, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PrepareUpgrade", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// PrepareUpgrade indicates an expected call of PrepareUpgrade.
func (mr *MockClusterUpgraderMockRecorder) PrepareUpgrade(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareUpgrade", reflect.TypeOf((*MockClusterUpgrader)(nil).PrepareUpgrade), arg0, arg1, arg2, arg3)
}
| 887 |
eks-anywhere | aws | Go | package yamlutil
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// APIObject represents a kubernetes API object.
type APIObject interface {
runtime.Object
GetName() string
}
// ObjectLookup allows to search APIObjects by a unique key composed of apiVersion, kind, and name.
type ObjectLookup map[string]APIObject
// GetFromRef searches in a ObjectLookup for an APIObject referenced by a corev1.ObjectReference.
func (o ObjectLookup) GetFromRef(ref corev1.ObjectReference) APIObject {
return o[keyForRef(ref)]
}
func (o ObjectLookup) add(obj APIObject) {
o[keyForObject(obj)] = obj
}
func NewObjectLookupBuilder() *ObjectLookupBuilder {
return &ObjectLookupBuilder{
lookup: ObjectLookup{},
}
}
// ObjectLookupBuilder allows to construct an ObjectLookup and add APIObjects to it.
type ObjectLookupBuilder struct {
lookup ObjectLookup
}
// Add acumulates an API object that will be included in the built ObjectLookup.
func (o *ObjectLookupBuilder) Add(objs ...APIObject) *ObjectLookupBuilder {
for _, obj := range objs {
o.lookup.add(obj)
}
return o
}
// Build constructs and returns an ObjectLookup
// After this method is called, the builder is reset and loses track
// of all previously added objects.
func (o *ObjectLookupBuilder) Build() ObjectLookup {
l := o.lookup
o.lookup = ObjectLookup{}
return l
}
func keyForRef(ref corev1.ObjectReference) string {
return key(ref.APIVersion, ref.Kind, ref.Name)
}
func key(apiVersion, kind, name string) string {
// this assumes we don't allow to have objects in multiple namespaces
return fmt.Sprintf("%s%s%s", apiVersion, kind, name)
}
func keyForObject(o APIObject) string {
return key(o.GetObjectKind().GroupVersionKind().GroupVersion().String(), o.GetObjectKind().GroupVersionKind().Kind, o.GetName())
}
| 68 |
eks-anywhere | aws | Go | package yamlutil_test
import (
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
func TestObjectLookupGetFromRef(t *testing.T) {
g := NewWithT(t)
want := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: "my-secret",
},
Data: map[string][]byte{
"username": []byte("test"),
"password": []byte("test"),
},
}
objRef := corev1.ObjectReference{
Kind: want.Kind,
APIVersion: want.APIVersion,
Name: want.Name,
Namespace: want.Namespace,
}
otherSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: "my-other-secret",
},
}
o := yamlutil.NewObjectLookupBuilder().Add(want, otherSecret).Build()
got := o.GetFromRef(objRef)
g.Expect(got).To(Equal(want))
}
| 44 |
eks-anywhere | aws | Go | package yamlutil
import (
"bufio"
"bytes"
"io"
"github.com/go-logr/logr"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiyaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
)
type (
// APIObjectGenerator returns an implementor of the APIObject interface.
APIObjectGenerator func() APIObject
// ParsedProcessor fills the struct of type T with the parsed API objects in ObjectLookup.
ParsedProcessor[T any] func(*T, ObjectLookup)
// Parser allows to parse from yaml with kubernetes style objects and
// store them in a type implementing Builder
// It allows to dynamically register configuration for mappings between kind and concrete types.
Parser struct {
apiObjectMapping map[string]APIObjectGenerator
generateObjAnyKind APIObjectGenerator
logger logr.Logger
}
)
func NewParser(logger logr.Logger) *Parser {
return &Parser{
apiObjectMapping: make(map[string]APIObjectGenerator),
logger: logger,
}
}
// RegisterMapping records the mapping between a kubernetes Kind and an API concrete type.
func (c *Parser) RegisterMapping(kind string, generator APIObjectGenerator) error {
if _, ok := c.apiObjectMapping[kind]; ok {
return errors.Errorf("mapping for api object %s already registered", kind)
}
c.apiObjectMapping[kind] = generator
return nil
}
// Mapping mapping between a kubernetes Kind and an API concrete type of type T.
type Mapping[T APIObject] struct {
New func() T
Kind string
}
func NewMapping[T APIObject](kind string, new func() T) Mapping[T] {
return Mapping[T]{
Kind: kind,
New: new,
}
}
// ToAPIObjectMapping is helper to convert from other concrete types of Mapping
// to a APIObject Mapping
// This is mostly to help pass Mappings to RegisterMappings.
func (m Mapping[T]) ToAPIObjectMapping() Mapping[APIObject] {
return Mapping[APIObject]{
Kind: m.Kind,
New: func() APIObject {
return m.New()
},
}
}
// RegisterMappings records a collection of mappings.
func (c *Parser) RegisterMappings(mappings ...Mapping[APIObject]) error {
for _, m := range mappings {
if err := c.RegisterMapping(m.Kind, m.New); err != nil {
return err
}
}
return nil
}
// RegisterMappingForAnyKind records an object generator that will be used
// as fallback when there is not a specific APIObjectGenerator registered for that particular kind.
func (c *Parser) RegisterMappingForAnyKind(generator APIObjectGenerator) {
c.generateObjAnyKind = generator
}
// Builder processes the parsed API objects contained in a lookup.
type Builder interface {
BuildFromParsed(ObjectLookup) error
}
// Parse reads yaml manifest content with the registered mappings and passes
// the result to the Builder for further processing.
func (p *Parser) Parse(yamlManifest []byte, b Builder) error {
return p.Read(bytes.NewReader(yamlManifest), b)
}
// Read reads yaml manifest content with the registered mappings and passes
// the result to the Builder for further processing.
func (p *Parser) Read(reader io.Reader, b Builder) error {
parsed, err := p.unmarshal(reader)
if err != nil {
return err
}
return p.buildConfigFromParsed(parsed, b)
}
type basicAPIObject struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
}
func (k *basicAPIObject) empty() bool {
return k.APIVersion == "" && k.Kind == ""
}
type parsed struct {
objects ObjectLookup
}
func (p *Parser) unmarshal(reader io.Reader) (*parsed, error) {
parsed := &parsed{
objects: ObjectLookup{},
}
yamlReader := apiyaml.NewYAMLReader(bufio.NewReader(reader))
for {
// Read one YAML document at a time, until io.EOF is returned
b, err := yamlReader.Read()
if err != nil {
if err == io.EOF {
break
}
return nil, errors.Wrap(err, "failed to read yaml")
}
if len(b) == 0 {
break
}
k := &basicAPIObject{}
if err = yaml.Unmarshal(b, k); err != nil {
return nil, errors.Wrap(err, "invalid yaml kubernetes object")
}
// Ignore empty objects.
// Empty objects are generated if there are weird things in manifest files like e.g. two --- in a row without a yaml doc in the middle
if k.empty() {
continue
}
var obj APIObject
if generateApiObj, ok := p.apiObjectMapping[k.Kind]; ok {
obj = generateApiObj()
} else if p.generateObjAnyKind != nil {
obj = p.generateObjAnyKind()
} else {
p.logger.V(2).Info("Ignoring object in yaml of unknown type during parsing", "kind", k.Kind)
continue
}
if err := yaml.Unmarshal(b, obj); err != nil {
return nil, errors.Wrapf(err, "invalid yaml for %s", k.Kind)
}
parsed.objects.add(obj)
}
return parsed, nil
}
func (p *Parser) buildConfigFromParsed(parsed *parsed, b Builder) error {
return b.BuildFromParsed(parsed.objects)
}
| 177 |
eks-anywhere | aws | Go | package yamlutil_test
import (
"errors"
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
type yamlHolder struct {
configMap *corev1.ConfigMap
secret *corev1.Secret
}
func (h *yamlHolder) BuildFromParsed(l yamlutil.ObjectLookup) error {
processConfigMap(h, l)
processSecret(h, l)
return nil
}
func processConfigMap(h *yamlHolder, lookup yamlutil.ObjectLookup) {
for _, obj := range lookup {
if obj.GetObjectKind().GroupVersionKind().Kind == "ConfigMap" {
h.configMap = obj.(*corev1.ConfigMap)
}
}
}
func processSecret(h *yamlHolder, lookup yamlutil.ObjectLookup) {
for _, obj := range lookup {
if obj.GetObjectKind().GroupVersionKind().Kind == "Secret" {
h.secret = obj.(*corev1.Secret)
}
}
}
func TestParserParse(t *testing.T) {
g := NewWithT(t)
yaml := `
apiVersion: v1
data:
Corefile: "d"
kind: ConfigMap
metadata:
name: aws-iam-authenticator
namespace: kube-system
uid: 4aa825d5-4334-4ce0-a754-0d3a3cceaefd
---
apiVersion: v1
kind: Secret
metadata:
name: aws-iam-authenticator
namespace: kube-system
data:
password: QWRtaW4=
username: YWRtaW4=
`
parser := yamlutil.NewParser(test.NewNullLogger())
g.Expect(
parser.RegisterMappings(
yamlutil.NewMapping("Secret", func() yamlutil.APIObject {
return &corev1.Secret{}
}),
yamlutil.NewMapping("ConfigMap", func() yamlutil.APIObject {
return &corev1.ConfigMap{}
}),
),
).To(Succeed())
holder := &yamlHolder{}
g.Expect(parser.Parse([]byte(yaml), holder)).To(Succeed())
g.Expect(holder).NotTo(BeNil())
g.Expect(holder.configMap.Data).To(HaveKeyWithValue("Corefile", "d"))
g.Expect(holder.secret.Data["username"]).To(Equal([]byte("admin")))
}
type reader struct {
read int
err error
}
func (r reader) Read(p []byte) (n int, err error) {
return r.read, r.err
}
func TestParserReadReaderError(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
holder := &yamlHolder{}
r := reader{
err: errors.New("failed from fake reader"),
}
g.Expect(parser.Read(r, holder)).To(MatchError(ContainSubstring("failed from fake reader")))
}
func TestParserParseEmptyContent(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
holder := &yamlHolder{}
g.Expect(parser.Parse([]byte("---"), holder)).NotTo(HaveOccurred())
}
func TestParserParseInvalidKubernetesYaml(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
holder := &yamlHolder{}
g.Expect(parser.Parse([]byte("1}"), holder)).To(MatchError(ContainSubstring(
"invalid yaml kubernetes object",
)))
}
func TestParserParseInvalidRegisterObjectYaml(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
holder := &yamlHolder{}
g.Expect(
parser.RegisterMappings(
yamlutil.NewMapping("Secret", func() yamlutil.APIObject {
return &corev1.Secret{}
}),
),
).To(Succeed())
g.Expect(parser.Parse([]byte("kind: Secret\ndata: 111"), holder)).To(MatchError(ContainSubstring(
"invalid yaml for Secret",
)))
}
func TestParserParseUnregisteredObject(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
holder := &yamlHolder{}
g.Expect(parser.Parse([]byte("kind: Secret"), holder)).NotTo(HaveOccurred())
}
func TestParserRegisterMappingDuplicateError(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
g.Expect(parser.RegisterMapping("Secret", func() yamlutil.APIObject {
return &corev1.Secret{}
})).To(Succeed())
g.Expect(parser.RegisterMapping("Secret", func() yamlutil.APIObject {
return &corev1.ConfigMap{}
})).To(MatchError(ContainSubstring("mapping for api object Secret already registered")))
}
func TestMappingToAPIObjectMapping(t *testing.T) {
g := NewWithT(t)
mapping := yamlutil.NewMapping("Secret", func() *corev1.Secret {
return &corev1.Secret{}
})
apiObjectMapping := mapping.ToAPIObjectMapping()
g.Expect(apiObjectMapping.Kind).To(Equal("Secret"))
secret := apiObjectMapping.New()
g.Expect(secret).To(BeAssignableToTypeOf(&corev1.Secret{}))
}
func TestParserRegisterMappingsError(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
g.Expect(
parser.RegisterMappings(
yamlutil.NewMapping("Secret", func() yamlutil.APIObject {
return &corev1.Secret{}
}),
yamlutil.NewMapping("Secret", func() yamlutil.APIObject {
return &corev1.ConfigMap{}
}),
),
).To(MatchError(ContainSubstring("mapping for api object Secret already registered")))
}
| 179 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"github.com/aws/eks-anywhere/release/cmd"
)
func main() {
cmd.Execute()
}
| 24 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
func (vb *VersionsBundle) Manifests() map[string][]*string {
return map[string][]*string{
"core-cluster-api": {
&vb.ClusterAPI.Components.URI,
&vb.ClusterAPI.Metadata.URI,
},
"capi-kubeadm-bootstrap": {
&vb.Bootstrap.Components.URI,
&vb.Bootstrap.Metadata.URI,
},
"capi-kubeadm-control-plane": {
&vb.ControlPlane.Components.URI,
&vb.ControlPlane.Metadata.URI,
},
"cert-manager": {
&vb.CertManager.Manifest.URI,
},
"cluster-api-provider-docker": {
&vb.Docker.Components.URI,
&vb.Docker.ClusterTemplate.URI,
&vb.Docker.Metadata.URI,
},
"cluster-api-provider-vsphere": {
&vb.VSphere.Components.URI,
&vb.VSphere.ClusterTemplate.URI,
&vb.VSphere.Metadata.URI,
},
"cluster-api-provider-cloudstack": {
&vb.CloudStack.Components.URI,
&vb.CloudStack.Metadata.URI,
},
"cluster-api-provider-tinkerbell": {
&vb.Tinkerbell.Components.URI,
&vb.Tinkerbell.ClusterTemplate.URI,
&vb.Tinkerbell.Metadata.URI,
},
"cluster-api-provider-snow": {
&vb.Snow.Components.URI,
&vb.Snow.Metadata.URI,
},
"cluster-api-provider-nutanix": {
&vb.Nutanix.Components.URI,
&vb.Nutanix.ClusterTemplate.URI,
&vb.Nutanix.Metadata.URI,
},
"cilium": {
&vb.Cilium.Manifest.URI,
},
"kindnetd": {
&vb.Kindnetd.Manifest.URI,
},
"eks-anywhere-cluster-controller": {
&vb.Eksa.Components.URI,
},
"etcdadm-bootstrap-provider": {
&vb.ExternalEtcdBootstrap.Components.URI,
&vb.ExternalEtcdBootstrap.Metadata.URI,
},
"etcdadm-controller": {
&vb.ExternalEtcdController.Components.URI,
&vb.ExternalEtcdController.Metadata.URI,
},
"eks-distro": {
&vb.EksD.Components,
&vb.EksD.EksDReleaseUrl,
},
}
}
func (vb *VersionsBundle) Ovas() []Archive {
return []Archive{
vb.EksD.Ova.Bottlerocket,
}
}
func (vb *VersionsBundle) CloudStackImages() []Image {
return []Image{
vb.CloudStack.ClusterAPIController,
vb.CloudStack.KubeRbacProxy,
vb.CloudStack.KubeVip,
}
}
func (vb *VersionsBundle) VsphereImages() []Image {
return []Image{
vb.VSphere.ClusterAPIController,
vb.VSphere.KubeProxy,
vb.VSphere.KubeVip,
vb.VSphere.Manager,
}
}
func (vb *VersionsBundle) DockerImages() []Image {
return []Image{
vb.Docker.KubeProxy,
vb.Docker.Manager,
}
}
func (vb *VersionsBundle) SnowImages() []Image {
i := make([]Image, 0, 2)
if vb.Snow.KubeVip.URI != "" {
i = append(i, vb.Snow.KubeVip)
}
if vb.Snow.Manager.URI != "" {
i = append(i, vb.Snow.Manager)
}
if vb.Snow.BottlerocketBootstrapSnow.URI != "" {
i = append(i, vb.Snow.BottlerocketBootstrapSnow)
}
return i
}
func (vb *VersionsBundle) TinkerbellImages() []Image {
return []Image{
vb.Tinkerbell.ClusterAPIController,
vb.Tinkerbell.KubeVip,
vb.Tinkerbell.Envoy,
vb.Tinkerbell.TinkerbellStack.Actions.Cexec,
vb.Tinkerbell.TinkerbellStack.Actions.Kexec,
vb.Tinkerbell.TinkerbellStack.Actions.ImageToDisk,
vb.Tinkerbell.TinkerbellStack.Actions.OciToDisk,
vb.Tinkerbell.TinkerbellStack.Actions.WriteFile,
vb.Tinkerbell.TinkerbellStack.Actions.Reboot,
vb.Tinkerbell.TinkerbellStack.Boots,
vb.Tinkerbell.TinkerbellStack.Hegel,
vb.Tinkerbell.TinkerbellStack.Hook.Bootkit,
vb.Tinkerbell.TinkerbellStack.Hook.Docker,
vb.Tinkerbell.TinkerbellStack.Hook.Kernel,
vb.Tinkerbell.TinkerbellStack.Rufio,
vb.Tinkerbell.TinkerbellStack.Tink.TinkController,
vb.Tinkerbell.TinkerbellStack.Tink.TinkServer,
vb.Tinkerbell.TinkerbellStack.Tink.TinkWorker,
}
}
func (vb *VersionsBundle) NutanixImages() []Image {
i := make([]Image, 0, 1)
if vb.Nutanix.ClusterAPIController.URI != "" {
i = append(i, vb.Nutanix.ClusterAPIController)
}
return i
}
func (vb *VersionsBundle) SharedImages() []Image {
return []Image{
vb.Bootstrap.Controller,
vb.Bootstrap.KubeProxy,
vb.BottleRocketHostContainers.Admin,
vb.BottleRocketHostContainers.Control,
vb.BottleRocketHostContainers.KubeadmBootstrap,
vb.CertManager.Acmesolver,
vb.CertManager.Cainjector,
vb.CertManager.Controller,
vb.CertManager.Ctl,
vb.CertManager.Webhook,
vb.Cilium.Cilium,
vb.Cilium.Operator,
vb.ClusterAPI.Controller,
vb.ClusterAPI.KubeProxy,
vb.ControlPlane.Controller,
vb.ControlPlane.KubeProxy,
vb.EksD.KindNode,
vb.Eksa.CliTools,
vb.Eksa.ClusterController,
vb.Eksa.DiagnosticCollector,
vb.Flux.HelmController,
vb.Flux.KustomizeController,
vb.Flux.NotificationController,
vb.Flux.SourceController,
vb.ExternalEtcdBootstrap.Controller,
vb.ExternalEtcdBootstrap.KubeProxy,
vb.ExternalEtcdController.Controller,
vb.ExternalEtcdController.KubeProxy,
vb.Haproxy.Image,
vb.PackageController.Controller,
vb.PackageController.TokenRefresher,
}
}
func (vb *VersionsBundle) Images() []Image {
groupedImages := [][]Image{
vb.SharedImages(),
vb.DockerImages(),
vb.VsphereImages(),
vb.CloudStackImages(),
vb.SnowImages(),
vb.TinkerbellImages(),
vb.NutanixImages(),
}
size := 0
for _, g := range groupedImages {
size += len(g)
}
images := make([]Image, 0, size)
for _, g := range groupedImages {
images = append(images, g...)
}
return images
}
func (vb *VersionsBundle) Charts() map[string]*Image {
return map[string]*Image{
"cilium": &vb.Cilium.HelmChart,
"eks-anywhere-packages": &vb.PackageController.HelmChart,
"tinkerbell-chart": &vb.Tinkerbell.TinkerbellStack.TinkebellChart,
}
}
| 230 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestVersionsBundleSnowImages(t *testing.T) {
tests := []struct {
name string
versionsBundle *v1alpha1.VersionsBundle
want []v1alpha1.Image
}{
{
name: "no images",
versionsBundle: &v1alpha1.VersionsBundle{},
want: []v1alpha1.Image{},
},
{
name: "kubevip images",
versionsBundle: &v1alpha1.VersionsBundle{
Snow: v1alpha1.SnowBundle{
KubeVip: v1alpha1.Image{
Name: "kubevip",
URI: "uri",
},
},
},
want: []v1alpha1.Image{
{
Name: "kubevip",
URI: "uri",
},
},
},
{
name: "manager images",
versionsBundle: &v1alpha1.VersionsBundle{
Snow: v1alpha1.SnowBundle{
Manager: v1alpha1.Image{
Name: "manage",
URI: "uri",
},
},
},
want: []v1alpha1.Image{
{
Name: "manage",
URI: "uri",
},
},
},
{
name: "bootstrap-snow images",
versionsBundle: &v1alpha1.VersionsBundle{
Snow: v1alpha1.SnowBundle{
Manager: v1alpha1.Image{
Name: "bootstrap-snow",
URI: "uri",
},
},
},
want: []v1alpha1.Image{
{
Name: "bootstrap-snow",
URI: "uri",
},
},
},
{
name: "all images",
versionsBundle: &v1alpha1.VersionsBundle{
Snow: v1alpha1.SnowBundle{
KubeVip: v1alpha1.Image{
Name: "kubevip",
URI: "uri",
},
Manager: v1alpha1.Image{
Name: "manage",
URI: "uri",
},
BottlerocketBootstrapSnow: v1alpha1.Image{
Name: "bootstrap-snow",
URI: "uri",
},
},
},
want: []v1alpha1.Image{
{
Name: "kubevip",
URI: "uri",
},
{
Name: "manage",
URI: "uri",
},
{
Name: "bootstrap-snow",
URI: "uri",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.versionsBundle.SnowImages()).To(Equal(tt.want))
})
}
}
| 128 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import "strings"
type Image struct {
// +kubebuilder:validation:Required
// The asset name
Name string `json:"name,omitempty"`
// +kubebuilder:validation:Required
Description string `json:"description,omitempty"`
// +kubebuilder:validation:Enum=linux;darwin;windows
// Operating system of the asset
OS string `json:"os,omitempty"`
// +optional
// Name of the OS like ubuntu, bottlerocket
OSName string `json:"osName,omitempty"`
// Architectures of the asset
Arch []string `json:"arch,omitempty"`
// The image repository, name, and tag
URI string `json:"uri,omitempty"`
// The SHA256 digest of the image manifest
ImageDigest string `json:"imageDigest,omitempty"`
}
func (i Image) VersionedImage() string {
return i.URI
}
func (i Image) Image() string {
lastInd := strings.LastIndex(i.URI, ":")
if lastInd == -1 {
return i.URI
}
return i.URI[:lastInd]
}
func (i Image) Tag() string {
lastInd := strings.LastIndex(i.URI, ":")
if lastInd == -1 || lastInd == len(i.URI)-1 {
return ""
}
return i.URI[lastInd+1:]
}
func (i Image) ChartName() string {
lastInd := strings.LastIndex(i.Image(), "/")
if lastInd == -1 {
return i.URI
}
chart := i.URI[lastInd+1:]
chart = strings.Replace(chart, ":", "-", 1)
chart += ".tgz"
return chart
}
func (i *Image) Registry() string {
result := strings.Split(i.URI, "/")
if len(result) < 1 {
return ""
}
return result[0]
}
func (i *Image) Repository() string {
rol := strings.TrimPrefix(i.URI, i.Registry()+"/")
result := strings.Split(rol, "@")
if len(result) < 2 {
result = strings.Split(rol, ":")
if len(result) < 1 {
return ""
}
return result[0]
}
return result[0]
}
func (i *Image) Digest() string {
rol := strings.TrimPrefix(i.URI, i.Registry()+"/")
result := strings.Split(rol, "@")
if len(result) < 2 {
return ""
}
return result[1]
}
func (i *Image) Version() string {
rol := strings.TrimPrefix(i.URI, i.Registry()+"/")
result := strings.Split(rol, "@")
if len(result) < 2 {
result = strings.Split(rol, ":")
if len(result) < 2 {
return ""
}
return result[1]
}
return ""
}
type Archive struct {
// +kubebuilder:validation:Required
// The asset name
Name string `json:"name,omitempty"`
// +kubebuilder:validation:Required
Description string `json:"description,omitempty"`
// +kubebuilder:validation:Enum=linux;darwin;windows
// Operating system of the asset
OS string `json:"os,omitempty"`
// +optional
// Name of the OS like ubuntu, bottlerocket
OSName string `json:"osName,omitempty"`
// Architectures of the asset
Arch []string `json:"arch,omitempty"`
// +kubebuilder:validation:Required
// The URI where the asset is located
URI string `json:"uri,omitempty"`
// +kubebuilder:validation:Required
// The sha512 of the asset, only applies for 'file' store
SHA512 string `json:"sha512,omitempty"`
// +kubebuilder:validation:Required
// The sha256 of the asset, only applies for 'file' store
SHA256 string `json:"sha256,omitempty"`
}
type Manifest struct {
// +kubebuilder:validation:Required
// URI points to the manifest yaml file
URI string `json:"uri,omitempty"`
}
| 154 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1_test
import (
"testing"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestImageVersionedImage(t *testing.T) {
tests := []struct {
testName string
URI string
want string
}{
{
testName: "full uri",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
},
{
testName: "full uri with port",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
i := v1alpha1.Image{
URI: tt.URI,
}
if got := i.VersionedImage(); got != tt.want {
t.Errorf("Image.VersionedImage() = %v, want %v", got, tt.want)
}
})
}
}
func TestImageImage(t *testing.T) {
tests := []struct {
testName string
URI string
want string
}{
{
testName: "full uri",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node",
},
{
testName: "full uri with port",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node",
},
{
testName: "no tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node",
want: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
i := v1alpha1.Image{
URI: tt.URI,
}
if got := i.Image(); got != tt.want {
t.Errorf("Image.Image() = %v, want %v", got, tt.want)
}
})
}
}
func TestImageTag(t *testing.T) {
tests := []struct {
testName string
URI string
want string
}{
{
testName: "full uri",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
},
{
testName: "full uri with port",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
},
{
testName: "no tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node",
want: "",
},
{
testName: "empty tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:",
want: "",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
i := v1alpha1.Image{
URI: tt.URI,
}
if got := i.Tag(); got != tt.want {
t.Errorf("Image.Tag() = %v, want %v", got, tt.want)
}
})
}
}
func TestImage_Registry(t *testing.T) {
tests := []struct {
testName string
URI string
want string
}{
{
testName: "full uri",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "public.ecr.aws",
},
{
testName: "full uri with port",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "public.ecr.aws:8484",
},
{
testName: "no slash",
URI: "public.ecr.aws",
want: "public.ecr.aws",
},
{
testName: "nothing",
URI: "",
want: "",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
i := v1alpha1.Image{
URI: tt.URI,
}
if got := i.Registry(); got != tt.want {
t.Errorf("Image.Registry() = %v, want %v", got, tt.want)
}
})
}
}
func TestImage_Repository(t *testing.T) {
tests := []struct {
testName string
URI string
want string
}{
{
testName: "tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "l0g8r8j6/kubernetes-sigs/kind/node",
},
{
testName: "port and tag",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "l0g8r8j6/kubernetes-sigs/kind/node",
},
{
testName: "port and sha256",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node@sha256:6165d26ef648100226c1944c6b1c83e875a4bf81bba91054a00c5121cfeff363",
want: "l0g8r8j6/kubernetes-sigs/kind/node",
},
{
testName: "port no tag",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node",
want: "l0g8r8j6/kubernetes-sigs/kind/node",
},
{
testName: "no tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node",
want: "l0g8r8j6/kubernetes-sigs/kind/node",
},
{
testName: "no nothing",
URI: "",
want: "",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
i := v1alpha1.Image{
URI: tt.URI,
}
if got := i.Repository(); got != tt.want {
t.Errorf("Image.Repository() = %v, want %v", got, tt.want)
}
})
}
}
func TestImage_Version(t *testing.T) {
tests := []struct {
testName string
URI string
want string
}{
{
testName: "tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
},
{
testName: "port and tag",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
},
{
testName: "port and sha256",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node@sha256:6165d26ef648100226c1944c6b1c83e875a4bf81bba91054a00c5121cfeff363",
want: "",
},
{
testName: "port no tag",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node",
want: "",
},
{
testName: "no tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node",
want: "",
},
{
testName: "no nothing",
URI: "",
want: "",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
i := v1alpha1.Image{
URI: tt.URI,
}
if got := i.Version(); got != tt.want {
t.Errorf("Image.Version() = %v, want %v", got, tt.want)
}
})
}
}
func TestImage_Digest(t *testing.T) {
tests := []struct {
testName string
URI string
want string
}{
{
testName: "tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node:v1.20.4-eks-d-1-20-1-eks-a-0.0.1.build.38",
want: "",
},
{
testName: "port and sha256",
URI: "public.ecr.aws:8484/l0g8r8j6/kubernetes-sigs/kind/node@sha256:6165d26ef648100226c1944c6b1c83e875a4bf81bba91054a00c5121cfeff363",
want: "sha256:6165d26ef648100226c1944c6b1c83e875a4bf81bba91054a00c5121cfeff363",
},
{
testName: "no tag",
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/node",
want: "",
},
{
testName: "no nothing",
URI: "",
want: "",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
i := v1alpha1.Image{
URI: tt.URI,
}
if got := i.Digest(); got != tt.want {
t.Errorf("Image.Digest() = %v, want %v", got, tt.want)
}
})
}
}
| 300 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// BundlesSpec defines the desired state of Bundles.
type BundlesSpec struct {
// Monotonically increasing release number
Number int `json:"number"`
CliMinVersion string `json:"cliMinVersion"`
CliMaxVersion string `json:"cliMaxVersion"`
VersionsBundles []VersionsBundle `json:"versionsBundles"`
}
// BundlesStatus defines the observed state of Bundles.
type BundlesStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Bundles is the Schema for the bundles API.
type Bundles struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BundlesSpec `json:"spec,omitempty"`
Status BundlesStatus `json:"status,omitempty"`
}
func (b *Bundles) DefaultEksAToolsImage() Image {
return b.Spec.VersionsBundles[0].Eksa.CliTools
}
//+kubebuilder:object:root=true
// BundlesList contains a list of Bundles.
type BundlesList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Bundles `json:"items"`
}
func init() {
SchemeBuilder.Register(&Bundles{}, &BundlesList{})
}
type VersionsBundle struct {
KubeVersion string `json:"kubeVersion"`
EksD EksDRelease `json:"eksD"`
CertManager CertManagerBundle `json:"certManager"`
ClusterAPI CoreClusterAPI `json:"clusterAPI"`
Bootstrap KubeadmBootstrapBundle `json:"bootstrap"`
ControlPlane KubeadmControlPlaneBundle `json:"controlPlane"`
VSphere VSphereBundle `json:"vSphere"`
CloudStack CloudStackBundle `json:"cloudStack,omitempty"`
Docker DockerBundle `json:"docker"`
Eksa EksaBundle `json:"eksa"`
Cilium CiliumBundle `json:"cilium"`
Kindnetd KindnetdBundle `json:"kindnetd"`
Flux FluxBundle `json:"flux"`
PackageController PackageBundle `json:"packageController"`
BottleRocketHostContainers BottlerocketHostContainersBundle `json:"bottlerocketHostContainers"`
ExternalEtcdBootstrap EtcdadmBootstrapBundle `json:"etcdadmBootstrap"`
ExternalEtcdController EtcdadmControllerBundle `json:"etcdadmController"`
Tinkerbell TinkerbellBundle `json:"tinkerbell,omitempty"`
Haproxy HaproxyBundle `json:"haproxy,omitempty"`
Snow SnowBundle `json:"snow,omitempty"`
Nutanix NutanixBundle `json:"nutanix,omitempty"`
// This field has been deprecated
Aws *AwsBundle `json:"aws,omitempty"`
}
type EksDRelease struct {
// +kubebuilder:validation:Required
Name string `json:"name,omitempty"`
// +kubebuilder:validation:Required
// Release branch of the EKS-D release like 1-19, 1-20
ReleaseChannel string `json:"channel,omitempty"`
// +kubebuilder:validation:Required
// Release number of EKS-D release
KubeVersion string `json:"kubeVersion,omitempty"`
// +kubebuilder:validation:Required
// Url pointing to the EKS-D release manifest using which
// assets where created
EksDReleaseUrl string `json:"manifestUrl,omitempty"`
// +kubebuilder:validation:Required
// Git commit the component is built from, before any patches
GitCommit string `json:"gitCommit,omitempty"`
// KindNode points to a kind image built with this eks-d version
KindNode Image `json:"kindNode,omitempty"`
// Ami points to a collection of AMIs built with this eks-d version
Ami OSImageBundle `json:"ami,omitempty"`
// Ova points to a collection of OVAs built with this eks-d version
Ova OSImageBundle `json:"ova,omitempty"`
// Raw points to a collection of Raw images built with this eks-d version
Raw OSImageBundle `json:"raw,omitempty"`
// Components refers to the url that points to the EKS-D release CRD
Components string `json:"components,omitempty"`
// Etcdadm points to the etcdadm binary/tarball built for this eks-d kube version
Etcdadm Archive `json:"etcdadm,omitempty"`
// Crictl points to the crictl binary/tarball built for this eks-d kube version
Crictl Archive `json:"crictl,omitempty"`
// ImageBuilder points to the image-builder binary used to build eks-D based node images
ImageBuilder Archive `json:"imagebuilder,omitempty"`
// Containerd points to the containerd binary baked into this eks-D based node image
Containerd Archive `json:"containerd,omitempty"`
}
type OSImageBundle struct {
Bottlerocket Archive `json:"bottlerocket,omitempty"`
}
type BottlerocketHostContainersBundle struct {
Admin Image `json:"admin"`
Control Image `json:"control"`
KubeadmBootstrap Image `json:"kubeadmBootstrap"`
}
type CertManagerBundle struct {
Version string `json:"version,omitempty"`
Acmesolver Image `json:"acmesolver"`
Cainjector Image `json:"cainjector"`
Controller Image `json:"controller"`
Ctl Image `json:"ctl"`
Webhook Image `json:"webhook"`
Manifest Manifest `json:"manifest"`
}
type CoreClusterAPI struct {
Version string `json:"version"`
Controller Image `json:"controller"`
KubeProxy Image `json:"kubeProxy"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
}
type KubeadmBootstrapBundle struct {
Version string `json:"version"`
Controller Image `json:"controller"`
KubeProxy Image `json:"kubeProxy"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
}
type KubeadmControlPlaneBundle struct {
Version string `json:"version"`
Controller Image `json:"controller"`
KubeProxy Image `json:"kubeProxy"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
}
type AwsBundle struct {
Version string `json:"version"`
Controller Image `json:"controller"`
KubeProxy Image `json:"kubeProxy"`
Components Manifest `json:"components"`
ClusterTemplate Manifest `json:"clusterTemplate"`
Metadata Manifest `json:"metadata"`
}
type VSphereBundle struct {
Version string `json:"version"`
ClusterAPIController Image `json:"clusterAPIController"`
KubeProxy Image `json:"kubeProxy"`
Manager Image `json:"manager"`
KubeVip Image `json:"kubeVip"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
ClusterTemplate Manifest `json:"clusterTemplate"`
// This field has been deprecated
Driver *Image `json:"driver,omitempty"`
// This field has been deprecated
Syncer *Image `json:"syncer,omitempty"`
}
type DockerBundle struct {
Version string `json:"version"`
Manager Image `json:"manager"`
KubeProxy Image `json:"kubeProxy"`
Components Manifest `json:"components"`
ClusterTemplate Manifest `json:"clusterTemplate"`
Metadata Manifest `json:"metadata"`
}
type CloudStackBundle struct {
Version string `json:"version"`
ClusterAPIController Image `json:"clusterAPIController"`
KubeRbacProxy Image `json:"kubeRbacProxy"`
KubeVip Image `json:"kubeVip"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
}
type CiliumBundle struct {
Version string `json:"version,omitempty"`
Cilium Image `json:"cilium"`
Operator Image `json:"operator"`
Manifest Manifest `json:"manifest"`
HelmChart Image `json:"helmChart,omitempty"`
}
type KindnetdBundle struct {
Version string `json:"version,omitempty"`
Manifest Manifest `json:"manifest"`
}
type FluxBundle struct {
Version string `json:"version,omitempty"`
SourceController Image `json:"sourceController"`
KustomizeController Image `json:"kustomizeController"`
HelmController Image `json:"helmController"`
NotificationController Image `json:"notificationController"`
}
type PackageBundle struct {
Version string `json:"version,omitempty"`
Controller Image `json:"packageController"`
TokenRefresher Image `json:"tokenRefresher"`
HelmChart Image `json:"helmChart,omitempty"`
}
type EksaBundle struct {
Version string `json:"version,omitempty"`
CliTools Image `json:"cliTools"`
ClusterController Image `json:"clusterController"`
DiagnosticCollector Image `json:"diagnosticCollector"`
Components Manifest `json:"components"`
}
type EtcdadmBootstrapBundle struct {
Version string `json:"version"`
Controller Image `json:"controller"`
KubeProxy Image `json:"kubeProxy"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
}
type EtcdadmControllerBundle struct {
Version string `json:"version"`
Controller Image `json:"controller"`
KubeProxy Image `json:"kubeProxy"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
}
type TinkerbellStackBundle struct {
Actions ActionsBundle `json:"actions"`
Boots Image `json:"boots"`
Hegel Image `json:"hegel"`
TinkebellChart Image `json:"tinkerbellChart"`
Hook HookBundle `json:"hook"`
Rufio Image `json:"rufio"`
Tink TinkBundle `json:"tink"`
}
// Tinkerbell Template Actions.
type ActionsBundle struct {
Cexec Image `json:"cexec"`
Kexec Image `json:"kexec"`
ImageToDisk Image `json:"imageToDisk"`
OciToDisk Image `json:"ociToDisk"`
WriteFile Image `json:"writeFile"`
Reboot Image `json:"reboot"`
}
type TinkBundle struct {
TinkController Image `json:"tinkController"`
TinkServer Image `json:"tinkServer"`
TinkWorker Image `json:"tinkWorker"`
}
// Tinkerbell hook OS.
type HookBundle struct {
Bootkit Image `json:"bootkit"`
Docker Image `json:"docker"`
Kernel Image `json:"kernel"`
Initramfs HookArch `json:"initramfs"`
Vmlinuz HookArch `json:"vmlinuz"`
}
type HookArch struct {
Arm Archive `json:"arm"`
Amd Archive `json:"amd"`
}
type TinkerbellBundle struct {
Version string `json:"version"`
ClusterAPIController Image `json:"clusterAPIController"`
KubeVip Image `json:"kubeVip"`
Envoy Image `json:"envoy"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
ClusterTemplate Manifest `json:"clusterTemplate"`
TinkerbellStack TinkerbellStackBundle `json:"tinkerbellStack,omitempty"`
}
type HaproxyBundle struct {
Image Image `json:"image"`
}
type SnowBundle struct {
Version string `json:"version"`
Manager Image `json:"manager"`
KubeVip Image `json:"kubeVip"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
BottlerocketBootstrapSnow Image `json:"bottlerocketBootstrapSnow"`
}
type NutanixBundle struct {
ClusterAPIController Image `json:"clusterAPIController"`
Version string `json:"version"`
KubeVip Image `json:"kubeVip"`
Components Manifest `json:"components"`
Metadata Manifest `json:"metadata"`
ClusterTemplate Manifest `json:"clusterTemplate"`
}
| 347 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestBundlesDefaultEksAToolsImage(t *testing.T) {
g := NewWithT(t)
bundles := &v1alpha1.Bundles{
Spec: v1alpha1.BundlesSpec{
VersionsBundles: []v1alpha1.VersionsBundle{
{
Eksa: v1alpha1.EksaBundle{
CliTools: v1alpha1.Image{
URI: "tools:v1.0.0",
},
},
},
},
},
}
g.Expect(bundles.DefaultEksAToolsImage()).To(Equal(v1alpha1.Image{URI: "tools:v1.0.0"}))
}
| 42 |
eks-anywhere | aws | Go | package v1alpha1
import (
"fmt"
"strings"
)
// EKSAReleaseKind is the Kind of EKSARelease.
const EKSAReleaseKind = "EKSARelease"
// Generates the naming convention of EKSARelease from a version.
func GenerateEKSAReleaseName(version string) string {
return fmt.Sprintf("eksa-%s", strings.ReplaceAll(version, ".", "-"))
}
| 15 |
eks-anywhere | aws | Go | package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// EKSARelease is the mapping between release semver of EKS-A and a Bundles resource on the cluster.
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
type EKSARelease struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec EKSAReleaseSpec `json:"spec,omitempty"`
}
// EKSAReleaseSpec defines the desired state of EKSARelease.
type EKSAReleaseSpec struct {
// +kubebuilder:validation:Required
// +kubebuilder:validation:Type=string
// Date of EKS-A Release
ReleaseDate string `json:"releaseDate"`
// +kubebuilder:validation:Required
// EKS-A release semantic version
Version string `json:"version"`
// +kubebuilder:validation:Required
// Git commit the component is built from, before any patches
GitCommit string `json:"gitCommit"`
// +kubebuilder:validation:Required
// Manifest url to parse bundle information from for this EKS-A release
BundleManifestURL string `json:"bundleManifestUrl"`
// Reference to a Bundles resource in the cluster
BundlesRef BundlesRef `json:"bundlesRef"`
}
// EKSAReleaseStatus defines the observed state of EKSARelease.
type EKSAReleaseStatus struct{}
// BundlesRef refers to a Bundles resource in the cluster.
type BundlesRef struct {
// APIVersion refers to the Bundles APIVersion
APIVersion string `json:"apiVersion"`
// Name refers to the name of the Bundles object in the cluster
Name string `json:"name"`
// Namespace refers to the Bundles's namespace
Namespace string `json:"namespace"`
}
// EKSAReleaseList is a list of EKSARelease resources.
// +kubebuilder:object:root=true
type EKSAReleaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []EKSARelease `json:"items"`
}
func init() {
SchemeBuilder.Register(&EKSARelease{}, &EKSAReleaseList{})
}
| 63 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// package v1alpha1 contains API Schema definitions for the release v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=anywhere.eks.amazonaws.com
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "anywhere.eks.amazonaws.com", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
| 35 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ReleaseSpec defines the desired state of Release.
type ReleaseSpec struct {
// +kubebuilder:validation:Required
// EKS-A Latest Release version following semver
LatestVersion string `json:"latestVersion"`
// +kubebuilder:validation:Required
// List of all eks-a releases
Releases []EksARelease `json:"releases"`
}
// ReleaseStatus defines the observed state of Release.
type ReleaseStatus struct{}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Release is the Schema for the releases API.
type Release struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ReleaseSpec `json:"spec"`
Status ReleaseStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// ReleaseList contains a list of Release.
type ReleaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Release `json:"items"`
}
func init() {
SchemeBuilder.Register(&Release{}, &ReleaseList{})
}
// EksARelease defines each release of EKS-Anywhere.
type EksARelease struct {
// +kubebuilder:validation:Required
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Format=date-time
Date string `json:"date"`
// +kubebuilder:validation:Required
// EKS-A release version
Version string `json:"version"`
// +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum=1
// Monotonically increasing release number
Number int `json:"number"`
// +kubebuilder:validation:Required
// Git commit the component is built from, before any patches
GitCommit string `json:"gitCommit"`
// Git tag the component is built from, before any patches
GitTag string `json:"gitTag"`
// +kubebuilder:validation:Required
// Manifest url to parse bundle information from for this EKS-A release
BundleManifestUrl string `json:"bundleManifestUrl"`
// +kubebuilder:validation:Required
// EKS Anywhere binary bundle
EksABinary BinaryBundle `json:"eksABinary"`
// +kubebuilder:validation:Required
// EKS Anywhere CLI bundle
EksACLI PlatformBundle `json:"eksACLI"`
}
type BinaryBundle struct {
// +kubebuilder:validation:Required
// EKS Anywhere Linux binary
LinuxBinary Archive `json:"linux"`
// +kubebuilder:validation:Required
// EKS Anywhere Darwin binary
DarwinBinary Archive `json:"darwin"`
}
type PlatformBundle struct {
// +kubebuilder:validation:Required
// EKS Anywhere Linux binary
LinuxBinary ArchitectureBundle `json:"linux"`
// +kubebuilder:validation:Required
// EKS Anywhere Darwin binary
DarwinBinary ArchitectureBundle `json:"darwin"`
}
type ArchitectureBundle struct {
Amd64 Archive `json:"amd64"`
Arm64 Archive `json:"arm64"`
}
| 120 |
eks-anywhere | aws | Go | //go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ActionsBundle) DeepCopyInto(out *ActionsBundle) {
*out = *in
in.Cexec.DeepCopyInto(&out.Cexec)
in.Kexec.DeepCopyInto(&out.Kexec)
in.ImageToDisk.DeepCopyInto(&out.ImageToDisk)
in.OciToDisk.DeepCopyInto(&out.OciToDisk)
in.WriteFile.DeepCopyInto(&out.WriteFile)
in.Reboot.DeepCopyInto(&out.Reboot)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionsBundle.
func (in *ActionsBundle) DeepCopy() *ActionsBundle {
if in == nil {
return nil
}
out := new(ActionsBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ArchitectureBundle) DeepCopyInto(out *ArchitectureBundle) {
*out = *in
in.Amd64.DeepCopyInto(&out.Amd64)
in.Arm64.DeepCopyInto(&out.Arm64)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArchitectureBundle.
func (in *ArchitectureBundle) DeepCopy() *ArchitectureBundle {
if in == nil {
return nil
}
out := new(ArchitectureBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Archive) DeepCopyInto(out *Archive) {
*out = *in
if in.Arch != nil {
in, out := &in.Arch, &out.Arch
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Archive.
func (in *Archive) DeepCopy() *Archive {
if in == nil {
return nil
}
out := new(Archive)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AwsBundle) DeepCopyInto(out *AwsBundle) {
*out = *in
in.Controller.DeepCopyInto(&out.Controller)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
out.Components = in.Components
out.ClusterTemplate = in.ClusterTemplate
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsBundle.
func (in *AwsBundle) DeepCopy() *AwsBundle {
if in == nil {
return nil
}
out := new(AwsBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BinaryBundle) DeepCopyInto(out *BinaryBundle) {
*out = *in
in.LinuxBinary.DeepCopyInto(&out.LinuxBinary)
in.DarwinBinary.DeepCopyInto(&out.DarwinBinary)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBundle.
func (in *BinaryBundle) DeepCopy() *BinaryBundle {
if in == nil {
return nil
}
out := new(BinaryBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BottlerocketHostContainersBundle) DeepCopyInto(out *BottlerocketHostContainersBundle) {
*out = *in
in.Admin.DeepCopyInto(&out.Admin)
in.Control.DeepCopyInto(&out.Control)
in.KubeadmBootstrap.DeepCopyInto(&out.KubeadmBootstrap)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BottlerocketHostContainersBundle.
func (in *BottlerocketHostContainersBundle) DeepCopy() *BottlerocketHostContainersBundle {
if in == nil {
return nil
}
out := new(BottlerocketHostContainersBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Bundles) DeepCopyInto(out *Bundles) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bundles.
func (in *Bundles) DeepCopy() *Bundles {
if in == nil {
return nil
}
out := new(Bundles)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Bundles) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BundlesList) DeepCopyInto(out *BundlesList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Bundles, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundlesList.
func (in *BundlesList) DeepCopy() *BundlesList {
if in == nil {
return nil
}
out := new(BundlesList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BundlesList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BundlesRef) DeepCopyInto(out *BundlesRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundlesRef.
func (in *BundlesRef) DeepCopy() *BundlesRef {
if in == nil {
return nil
}
out := new(BundlesRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BundlesSpec) DeepCopyInto(out *BundlesSpec) {
*out = *in
if in.VersionsBundles != nil {
in, out := &in.VersionsBundles, &out.VersionsBundles
*out = make([]VersionsBundle, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundlesSpec.
func (in *BundlesSpec) DeepCopy() *BundlesSpec {
if in == nil {
return nil
}
out := new(BundlesSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BundlesStatus) DeepCopyInto(out *BundlesStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundlesStatus.
func (in *BundlesStatus) DeepCopy() *BundlesStatus {
if in == nil {
return nil
}
out := new(BundlesStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CertManagerBundle) DeepCopyInto(out *CertManagerBundle) {
*out = *in
in.Acmesolver.DeepCopyInto(&out.Acmesolver)
in.Cainjector.DeepCopyInto(&out.Cainjector)
in.Controller.DeepCopyInto(&out.Controller)
in.Ctl.DeepCopyInto(&out.Ctl)
in.Webhook.DeepCopyInto(&out.Webhook)
out.Manifest = in.Manifest
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertManagerBundle.
func (in *CertManagerBundle) DeepCopy() *CertManagerBundle {
if in == nil {
return nil
}
out := new(CertManagerBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CiliumBundle) DeepCopyInto(out *CiliumBundle) {
*out = *in
in.Cilium.DeepCopyInto(&out.Cilium)
in.Operator.DeepCopyInto(&out.Operator)
out.Manifest = in.Manifest
in.HelmChart.DeepCopyInto(&out.HelmChart)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBundle.
func (in *CiliumBundle) DeepCopy() *CiliumBundle {
if in == nil {
return nil
}
out := new(CiliumBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudStackBundle) DeepCopyInto(out *CloudStackBundle) {
*out = *in
in.ClusterAPIController.DeepCopyInto(&out.ClusterAPIController)
in.KubeRbacProxy.DeepCopyInto(&out.KubeRbacProxy)
in.KubeVip.DeepCopyInto(&out.KubeVip)
out.Components = in.Components
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudStackBundle.
func (in *CloudStackBundle) DeepCopy() *CloudStackBundle {
if in == nil {
return nil
}
out := new(CloudStackBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CoreClusterAPI) DeepCopyInto(out *CoreClusterAPI) {
*out = *in
in.Controller.DeepCopyInto(&out.Controller)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
out.Components = in.Components
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreClusterAPI.
func (in *CoreClusterAPI) DeepCopy() *CoreClusterAPI {
if in == nil {
return nil
}
out := new(CoreClusterAPI)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DockerBundle) DeepCopyInto(out *DockerBundle) {
*out = *in
in.Manager.DeepCopyInto(&out.Manager)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
out.Components = in.Components
out.ClusterTemplate = in.ClusterTemplate
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerBundle.
func (in *DockerBundle) DeepCopy() *DockerBundle {
if in == nil {
return nil
}
out := new(DockerBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EKSARelease) DeepCopyInto(out *EKSARelease) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSARelease.
func (in *EKSARelease) DeepCopy() *EKSARelease {
if in == nil {
return nil
}
out := new(EKSARelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EKSARelease) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EKSAReleaseList) DeepCopyInto(out *EKSAReleaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]EKSARelease, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSAReleaseList.
func (in *EKSAReleaseList) DeepCopy() *EKSAReleaseList {
if in == nil {
return nil
}
out := new(EKSAReleaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EKSAReleaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EKSAReleaseSpec) DeepCopyInto(out *EKSAReleaseSpec) {
*out = *in
out.BundlesRef = in.BundlesRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSAReleaseSpec.
func (in *EKSAReleaseSpec) DeepCopy() *EKSAReleaseSpec {
if in == nil {
return nil
}
out := new(EKSAReleaseSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EKSAReleaseStatus) DeepCopyInto(out *EKSAReleaseStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EKSAReleaseStatus.
func (in *EKSAReleaseStatus) DeepCopy() *EKSAReleaseStatus {
if in == nil {
return nil
}
out := new(EKSAReleaseStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EksARelease) DeepCopyInto(out *EksARelease) {
*out = *in
in.EksABinary.DeepCopyInto(&out.EksABinary)
in.EksACLI.DeepCopyInto(&out.EksACLI)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EksARelease.
func (in *EksARelease) DeepCopy() *EksARelease {
if in == nil {
return nil
}
out := new(EksARelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EksDRelease) DeepCopyInto(out *EksDRelease) {
*out = *in
in.KindNode.DeepCopyInto(&out.KindNode)
in.Ami.DeepCopyInto(&out.Ami)
in.Ova.DeepCopyInto(&out.Ova)
in.Raw.DeepCopyInto(&out.Raw)
in.Etcdadm.DeepCopyInto(&out.Etcdadm)
in.Crictl.DeepCopyInto(&out.Crictl)
in.ImageBuilder.DeepCopyInto(&out.ImageBuilder)
in.Containerd.DeepCopyInto(&out.Containerd)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EksDRelease.
func (in *EksDRelease) DeepCopy() *EksDRelease {
if in == nil {
return nil
}
out := new(EksDRelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EksaBundle) DeepCopyInto(out *EksaBundle) {
*out = *in
in.CliTools.DeepCopyInto(&out.CliTools)
in.ClusterController.DeepCopyInto(&out.ClusterController)
in.DiagnosticCollector.DeepCopyInto(&out.DiagnosticCollector)
out.Components = in.Components
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EksaBundle.
func (in *EksaBundle) DeepCopy() *EksaBundle {
if in == nil {
return nil
}
out := new(EksaBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EtcdadmBootstrapBundle) DeepCopyInto(out *EtcdadmBootstrapBundle) {
*out = *in
in.Controller.DeepCopyInto(&out.Controller)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
out.Components = in.Components
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmBootstrapBundle.
func (in *EtcdadmBootstrapBundle) DeepCopy() *EtcdadmBootstrapBundle {
if in == nil {
return nil
}
out := new(EtcdadmBootstrapBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EtcdadmControllerBundle) DeepCopyInto(out *EtcdadmControllerBundle) {
*out = *in
in.Controller.DeepCopyInto(&out.Controller)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
out.Components = in.Components
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdadmControllerBundle.
func (in *EtcdadmControllerBundle) DeepCopy() *EtcdadmControllerBundle {
if in == nil {
return nil
}
out := new(EtcdadmControllerBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FluxBundle) DeepCopyInto(out *FluxBundle) {
*out = *in
in.SourceController.DeepCopyInto(&out.SourceController)
in.KustomizeController.DeepCopyInto(&out.KustomizeController)
in.HelmController.DeepCopyInto(&out.HelmController)
in.NotificationController.DeepCopyInto(&out.NotificationController)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluxBundle.
func (in *FluxBundle) DeepCopy() *FluxBundle {
if in == nil {
return nil
}
out := new(FluxBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HaproxyBundle) DeepCopyInto(out *HaproxyBundle) {
*out = *in
in.Image.DeepCopyInto(&out.Image)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HaproxyBundle.
func (in *HaproxyBundle) DeepCopy() *HaproxyBundle {
if in == nil {
return nil
}
out := new(HaproxyBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HookArch) DeepCopyInto(out *HookArch) {
*out = *in
in.Arm.DeepCopyInto(&out.Arm)
in.Amd.DeepCopyInto(&out.Amd)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HookArch.
func (in *HookArch) DeepCopy() *HookArch {
if in == nil {
return nil
}
out := new(HookArch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HookBundle) DeepCopyInto(out *HookBundle) {
*out = *in
in.Bootkit.DeepCopyInto(&out.Bootkit)
in.Docker.DeepCopyInto(&out.Docker)
in.Kernel.DeepCopyInto(&out.Kernel)
in.Initramfs.DeepCopyInto(&out.Initramfs)
in.Vmlinuz.DeepCopyInto(&out.Vmlinuz)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HookBundle.
func (in *HookBundle) DeepCopy() *HookBundle {
if in == nil {
return nil
}
out := new(HookBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Image) DeepCopyInto(out *Image) {
*out = *in
if in.Arch != nil {
in, out := &in.Arch, &out.Arch
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
func (in *Image) DeepCopy() *Image {
if in == nil {
return nil
}
out := new(Image)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KindnetdBundle) DeepCopyInto(out *KindnetdBundle) {
*out = *in
out.Manifest = in.Manifest
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KindnetdBundle.
func (in *KindnetdBundle) DeepCopy() *KindnetdBundle {
if in == nil {
return nil
}
out := new(KindnetdBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeadmBootstrapBundle) DeepCopyInto(out *KubeadmBootstrapBundle) {
*out = *in
in.Controller.DeepCopyInto(&out.Controller)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
out.Components = in.Components
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeadmBootstrapBundle.
func (in *KubeadmBootstrapBundle) DeepCopy() *KubeadmBootstrapBundle {
if in == nil {
return nil
}
out := new(KubeadmBootstrapBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeadmControlPlaneBundle) DeepCopyInto(out *KubeadmControlPlaneBundle) {
*out = *in
in.Controller.DeepCopyInto(&out.Controller)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
out.Components = in.Components
out.Metadata = in.Metadata
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeadmControlPlaneBundle.
func (in *KubeadmControlPlaneBundle) DeepCopy() *KubeadmControlPlaneBundle {
if in == nil {
return nil
}
out := new(KubeadmControlPlaneBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Manifest) DeepCopyInto(out *Manifest) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest.
func (in *Manifest) DeepCopy() *Manifest {
if in == nil {
return nil
}
out := new(Manifest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NutanixBundle) DeepCopyInto(out *NutanixBundle) {
*out = *in
in.ClusterAPIController.DeepCopyInto(&out.ClusterAPIController)
in.KubeVip.DeepCopyInto(&out.KubeVip)
out.Components = in.Components
out.Metadata = in.Metadata
out.ClusterTemplate = in.ClusterTemplate
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixBundle.
func (in *NutanixBundle) DeepCopy() *NutanixBundle {
if in == nil {
return nil
}
out := new(NutanixBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSImageBundle) DeepCopyInto(out *OSImageBundle) {
*out = *in
in.Bottlerocket.DeepCopyInto(&out.Bottlerocket)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageBundle.
func (in *OSImageBundle) DeepCopy() *OSImageBundle {
if in == nil {
return nil
}
out := new(OSImageBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PackageBundle) DeepCopyInto(out *PackageBundle) {
*out = *in
in.Controller.DeepCopyInto(&out.Controller)
in.TokenRefresher.DeepCopyInto(&out.TokenRefresher)
in.HelmChart.DeepCopyInto(&out.HelmChart)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageBundle.
func (in *PackageBundle) DeepCopy() *PackageBundle {
if in == nil {
return nil
}
out := new(PackageBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PlatformBundle) DeepCopyInto(out *PlatformBundle) {
*out = *in
in.LinuxBinary.DeepCopyInto(&out.LinuxBinary)
in.DarwinBinary.DeepCopyInto(&out.DarwinBinary)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformBundle.
func (in *PlatformBundle) DeepCopy() *PlatformBundle {
if in == nil {
return nil
}
out := new(PlatformBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Release) DeepCopyInto(out *Release) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release.
func (in *Release) DeepCopy() *Release {
if in == nil {
return nil
}
out := new(Release)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Release) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReleaseList) DeepCopyInto(out *ReleaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Release, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseList.
func (in *ReleaseList) DeepCopy() *ReleaseList {
if in == nil {
return nil
}
out := new(ReleaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReleaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReleaseSpec) DeepCopyInto(out *ReleaseSpec) {
*out = *in
if in.Releases != nil {
in, out := &in.Releases, &out.Releases
*out = make([]EksARelease, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseSpec.
func (in *ReleaseSpec) DeepCopy() *ReleaseSpec {
if in == nil {
return nil
}
out := new(ReleaseSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReleaseStatus) DeepCopyInto(out *ReleaseStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReleaseStatus.
func (in *ReleaseStatus) DeepCopy() *ReleaseStatus {
if in == nil {
return nil
}
out := new(ReleaseStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SnowBundle) DeepCopyInto(out *SnowBundle) {
*out = *in
in.Manager.DeepCopyInto(&out.Manager)
in.KubeVip.DeepCopyInto(&out.KubeVip)
out.Components = in.Components
out.Metadata = in.Metadata
in.BottlerocketBootstrapSnow.DeepCopyInto(&out.BottlerocketBootstrapSnow)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnowBundle.
func (in *SnowBundle) DeepCopy() *SnowBundle {
if in == nil {
return nil
}
out := new(SnowBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TinkBundle) DeepCopyInto(out *TinkBundle) {
*out = *in
in.TinkController.DeepCopyInto(&out.TinkController)
in.TinkServer.DeepCopyInto(&out.TinkServer)
in.TinkWorker.DeepCopyInto(&out.TinkWorker)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TinkBundle.
func (in *TinkBundle) DeepCopy() *TinkBundle {
if in == nil {
return nil
}
out := new(TinkBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TinkerbellBundle) DeepCopyInto(out *TinkerbellBundle) {
*out = *in
in.ClusterAPIController.DeepCopyInto(&out.ClusterAPIController)
in.KubeVip.DeepCopyInto(&out.KubeVip)
in.Envoy.DeepCopyInto(&out.Envoy)
out.Components = in.Components
out.Metadata = in.Metadata
out.ClusterTemplate = in.ClusterTemplate
in.TinkerbellStack.DeepCopyInto(&out.TinkerbellStack)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TinkerbellBundle.
func (in *TinkerbellBundle) DeepCopy() *TinkerbellBundle {
if in == nil {
return nil
}
out := new(TinkerbellBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TinkerbellStackBundle) DeepCopyInto(out *TinkerbellStackBundle) {
*out = *in
in.Actions.DeepCopyInto(&out.Actions)
in.Boots.DeepCopyInto(&out.Boots)
in.Hegel.DeepCopyInto(&out.Hegel)
in.TinkebellChart.DeepCopyInto(&out.TinkebellChart)
in.Hook.DeepCopyInto(&out.Hook)
in.Rufio.DeepCopyInto(&out.Rufio)
in.Tink.DeepCopyInto(&out.Tink)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TinkerbellStackBundle.
func (in *TinkerbellStackBundle) DeepCopy() *TinkerbellStackBundle {
if in == nil {
return nil
}
out := new(TinkerbellStackBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VSphereBundle) DeepCopyInto(out *VSphereBundle) {
*out = *in
in.ClusterAPIController.DeepCopyInto(&out.ClusterAPIController)
in.KubeProxy.DeepCopyInto(&out.KubeProxy)
in.Manager.DeepCopyInto(&out.Manager)
in.KubeVip.DeepCopyInto(&out.KubeVip)
out.Components = in.Components
out.Metadata = in.Metadata
out.ClusterTemplate = in.ClusterTemplate
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(Image)
(*in).DeepCopyInto(*out)
}
if in.Syncer != nil {
in, out := &in.Syncer, &out.Syncer
*out = new(Image)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereBundle.
func (in *VSphereBundle) DeepCopy() *VSphereBundle {
if in == nil {
return nil
}
out := new(VSphereBundle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VersionsBundle) DeepCopyInto(out *VersionsBundle) {
*out = *in
in.EksD.DeepCopyInto(&out.EksD)
in.CertManager.DeepCopyInto(&out.CertManager)
in.ClusterAPI.DeepCopyInto(&out.ClusterAPI)
in.Bootstrap.DeepCopyInto(&out.Bootstrap)
in.ControlPlane.DeepCopyInto(&out.ControlPlane)
in.VSphere.DeepCopyInto(&out.VSphere)
in.CloudStack.DeepCopyInto(&out.CloudStack)
in.Docker.DeepCopyInto(&out.Docker)
in.Eksa.DeepCopyInto(&out.Eksa)
in.Cilium.DeepCopyInto(&out.Cilium)
out.Kindnetd = in.Kindnetd
in.Flux.DeepCopyInto(&out.Flux)
in.PackageController.DeepCopyInto(&out.PackageController)
in.BottleRocketHostContainers.DeepCopyInto(&out.BottleRocketHostContainers)
in.ExternalEtcdBootstrap.DeepCopyInto(&out.ExternalEtcdBootstrap)
in.ExternalEtcdController.DeepCopyInto(&out.ExternalEtcdController)
in.Tinkerbell.DeepCopyInto(&out.Tinkerbell)
in.Haproxy.DeepCopyInto(&out.Haproxy)
in.Snow.DeepCopyInto(&out.Snow)
in.Nutanix.DeepCopyInto(&out.Nutanix)
if in.Aws != nil {
in, out := &in.Aws, &out.Aws
*out = new(AwsBundle)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionsBundle.
func (in *VersionsBundle) DeepCopy() *VersionsBundle {
if in == nil {
return nil
}
out := new(VersionsBundle)
in.DeepCopyInto(out)
return out
}
| 1,016 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/spf13/cobra"
"github.com/spf13/viper"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/release/pkg/aws/s3"
"github.com/aws/eks-anywhere/release/pkg/bundles"
"github.com/aws/eks-anywhere/release/pkg/clients"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
"github.com/aws/eks-anywhere/release/pkg/operations"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
releaseutils "github.com/aws/eks-anywhere/release/pkg/util/release"
)
var (
bundleReleaseManifestFile = "/bundle-release.yaml"
eksAReleaseManifestFile = "/eks-a-release.yaml"
)
// releaseCmd represents the release command.
var releaseCmd = &cobra.Command{
Use: "release",
Short: "Cut an eks-anywhere release",
PreRun: func(cmd *cobra.Command, args []string) {
err := viper.BindPFlags(cmd.Flags())
if err != nil {
fmt.Printf("Error initializing flags: %v\n", err)
os.Exit(1)
}
},
Run: func(cmd *cobra.Command, args []string) {
// TODO validation on these flags
releaseVersion := viper.GetString("release-version")
bundleNumber := viper.GetInt("bundle-number")
cliMinVersion := viper.GetString("min-version")
cliMaxVersion := viper.GetString("max-version")
releaseNumber := viper.GetInt("release-number")
cliRepoDir := viper.GetString("cli-repo-source")
buildRepoDir := viper.GetString("build-repo-source")
cliRepoUrl := viper.GetString("cli-repo-url")
buildRepoUrl := viper.GetString("build-repo-url")
buildRepoBranchName := viper.GetString("build-repo-branch-name")
cliRepoBranchName := viper.GetString("cli-repo-branch-name")
artifactDir := viper.GetString("artifact-dir")
sourceBucket := viper.GetString("source-bucket")
releaseBucket := viper.GetString("release-bucket")
sourceContainerRegistry := viper.GetString("source-container-registry")
releaseContainerRegistry := viper.GetString("release-container-registry")
cdn := viper.GetString("cdn")
devRelease := viper.GetBool("dev-release")
dryRun := viper.GetBool("dry-run")
weekly := viper.GetBool("weekly")
releaseTime := time.Now().UTC()
releaseDate := releaseTime.Format(constants.YYYYMMDD)
var bundleRelease bool
var releaseEnvironment string
if !devRelease {
bundleRelease = viper.GetBool("bundle-release")
releaseEnvironment = viper.GetString("release-environment")
}
if bundleRelease {
releaseVersion = cliMaxVersion
}
releaseConfig := &releasetypes.ReleaseConfig{
CliRepoSource: cliRepoDir,
BuildRepoSource: buildRepoDir,
CliRepoUrl: cliRepoUrl,
BuildRepoUrl: buildRepoUrl,
BuildRepoBranchName: buildRepoBranchName,
CliRepoBranchName: cliRepoBranchName,
ArtifactDir: artifactDir,
SourceBucket: sourceBucket,
ReleaseBucket: releaseBucket,
SourceContainerRegistry: sourceContainerRegistry,
ReleaseContainerRegistry: releaseContainerRegistry,
CDN: cdn,
BundleNumber: bundleNumber,
ReleaseNumber: releaseNumber,
ReleaseVersion: releaseVersion,
ReleaseDate: releaseDate,
ReleaseTime: releaseTime,
DevRelease: devRelease,
DryRun: dryRun,
Weekly: weekly,
ReleaseEnvironment: releaseEnvironment,
}
err := operations.SetRepoHeads(releaseConfig)
if err != nil {
fmt.Printf("Error getting heads of code repositories: %v\n", err)
os.Exit(1)
}
var sourceClients *clients.SourceClients
var releaseClients *clients.ReleaseClients
if devRelease {
sourceClients, releaseClients, err = clients.CreateDevReleaseClients(dryRun)
if err != nil {
fmt.Printf("Error creating clients: %v\n", err)
os.Exit(1)
}
fmt.Printf("%s Successfully created dev release clients\n", constants.SuccessIcon)
}
if releaseEnvironment == "development" {
sourceClients, releaseClients, err = clients.CreateStagingReleaseClients()
if err != nil {
fmt.Printf("Error creating clients: %v\n", err)
os.Exit(1)
}
fmt.Printf("%s Successfully created staging release clients\n", constants.SuccessIcon)
}
if releaseEnvironment == "production" {
sourceClients, releaseClients, err = clients.CreateProdReleaseClients()
if err != nil {
fmt.Printf("Error creating clients: %v\n", err)
os.Exit(1)
}
fmt.Printf("%s Successfully created dev release clients\n", constants.SuccessIcon)
}
releaseConfig.SourceClients = sourceClients
releaseConfig.ReleaseClients = releaseClients
if devRelease {
buildNumber, err := filereader.GetNextEksADevBuildNumber(releaseVersion, releaseConfig)
if err != nil {
fmt.Printf("Error getting previous EKS-A dev release number: %v\n", err)
os.Exit(1)
}
releaseVersion, err = filereader.GetCurrentEksADevReleaseVersion(releaseVersion, releaseConfig, buildNumber)
if err != nil {
fmt.Printf("Error getting previous EKS-A dev release number: %v\n", err)
os.Exit(1)
}
releaseConfig.BundleNumber = buildNumber
releaseConfig.ReleaseVersion = releaseVersion
}
releaseConfig.DevReleaseUriVersion = strings.ReplaceAll(releaseVersion, "+", "-")
if devRelease || bundleRelease {
bundle := bundles.NewBaseBundles(releaseConfig)
bundle.Spec.CliMinVersion = cliMinVersion
bundle.Spec.CliMaxVersion = cliMaxVersion
bundleArtifactsTable, err := operations.GenerateBundleArtifactsTable(releaseConfig)
if err != nil {
fmt.Printf("Error getting bundle artifacts data: %v\n", err)
os.Exit(1)
}
releaseConfig.BundleArtifactsTable = bundleArtifactsTable
// Download ECR images + S3 artifacts and rename them to the
// proper release URIs + Upload them to release destinations
err = operations.BundleArtifactsRelease(releaseConfig)
if err != nil {
fmt.Printf("Error releasing bundle artifacts: %v\n", err)
os.Exit(1)
}
imageDigests, err := operations.GenerateImageDigestsTable(releaseConfig)
if err != nil {
fmt.Printf("Error generating image digests table: %+v\n", err)
os.Exit(1)
}
err = operations.GenerateBundleSpec(releaseConfig, bundle, imageDigests)
if err != nil {
fmt.Printf("Error generating bundles manifest: %+v\n", err)
os.Exit(1)
}
bundleManifest, err := yaml.Marshal(bundle)
if err != nil {
fmt.Printf("Error marshaling bundles manifest: %+v\n", err)
os.Exit(1)
}
fmt.Printf("\n%s\n", string(bundleManifest))
if !dryRun {
err = os.WriteFile(bundleReleaseManifestFile, bundleManifest, 0o644)
if err != nil {
fmt.Printf("Error writing bundles manifest file to disk: %v\n", err)
os.Exit(1)
}
bundleReleaseManifestKey := artifactutils.GetManifestFilepaths(releaseConfig.DevRelease, releaseConfig.Weekly, releaseConfig.BundleNumber, constants.BundlesKind, releaseConfig.BuildRepoBranchName, releaseConfig.ReleaseDate)
err = s3.UploadFile(bundleReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(bundleReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader)
if err != nil {
fmt.Printf("Error uploading bundle manifest to release bucket: %+v", err)
os.Exit(1)
}
fmt.Printf("%s Successfully completed bundle release\n", constants.SuccessIcon)
}
}
if devRelease || !bundleRelease {
release, err := releaseutils.GetPreviousReleaseIfExists(releaseConfig)
if err != nil {
fmt.Printf("Error getting previous EKS-A releases: %v\n", err)
os.Exit(1)
}
release.Name = "eks-anywhere"
release.APIVersion = "anywhere.eks.amazonaws.com/v1alpha1"
release.Kind = constants.ReleaseKind
release.CreationTimestamp = v1.Time{Time: releaseTime}
release.Spec.LatestVersion = releaseVersion
eksAArtifactsTable, err := operations.GenerateEksAArtifactsTable(releaseConfig)
if err != nil {
fmt.Printf("Error getting EKS-A artifacts data: %v\n", err)
os.Exit(1)
}
releaseConfig.EksAArtifactsTable = eksAArtifactsTable
err = operations.EksAArtifactsRelease(releaseConfig)
if err != nil {
fmt.Printf("Error releasing EKS-A CLI artifacts: %v\n", err)
os.Exit(1)
}
currentEksARelease, err := bundles.GetEksARelease(releaseConfig)
if err != nil {
fmt.Printf("Error getting EKS-A release: %v\n", err)
os.Exit(1)
}
currentEksAReleaseYaml, err := yaml.Marshal(currentEksARelease)
if err != nil {
fmt.Printf("Error marshaling EKS-A releases manifest: %v\n", err)
os.Exit(1)
}
fmt.Printf("\n%s\n", string(currentEksAReleaseYaml))
if dryRun {
fmt.Printf("%s Successfully completed dry-run of release process\n", constants.SuccessIcon)
os.Exit(0)
}
previousReleases := releaseutils.EksAReleases(release.Spec.Releases)
release.Spec.Releases = previousReleases.AppendOrUpdateRelease(currentEksARelease)
releaseManifest, err := yaml.Marshal(release)
if err != nil {
fmt.Printf("Error marshaling EKS-A releases manifest: %v\n", err)
os.Exit(1)
}
// Push the manifest file and other artifacts to release locations
err = os.WriteFile(eksAReleaseManifestFile, releaseManifest, 0o644)
if err != nil {
fmt.Printf("Error writing EKS-A release manifest file to disk: %v\n", err)
os.Exit(1)
}
eksAReleaseManifestKey := artifactutils.GetManifestFilepaths(releaseConfig.DevRelease, releaseConfig.Weekly, releaseConfig.BundleNumber, constants.ReleaseKind, releaseConfig.BuildRepoBranchName, releaseConfig.ReleaseDate)
err = s3.UploadFile(eksAReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(eksAReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader)
if err != nil {
fmt.Printf("Error uploading EKS-A release manifest to release bucket: %v", err)
os.Exit(1)
}
if !weekly {
err = filereader.PutEksAReleaseVersion(releaseVersion, releaseConfig)
if err != nil {
fmt.Printf("Error uploading latest EKS-A release version to S3: %v\n", err)
os.Exit(1)
}
}
fmt.Printf("%s Successfully completed EKS-A release\n", constants.SuccessIcon)
}
},
}
func init() {
rootCmd.AddCommand(releaseCmd)
releaseCmd.Flags().String("release-version", "vDev", "The version of eks-a")
releaseCmd.Flags().Int("bundle-number", 1, "The bundle version number")
releaseCmd.Flags().String("min-version", "v0.0.0", "The minimum version of eks-a supported by dependency bundles")
releaseCmd.Flags().String("max-version", "v0.0.0", "The maximum version of eks-a supported by dependency bundles")
releaseCmd.Flags().Int("release-number", 1, "The release-number to create")
releaseCmd.Flags().String("cli-repo-url", "", "URL to clone the eks-anywhere repo")
releaseCmd.Flags().String("build-repo-url", "", "URL to clone the eks-anywhere-build-tooling repo")
releaseCmd.Flags().String("cli-repo-source", "", "The eks-anywhere-cli source")
releaseCmd.Flags().String("build-repo-source", "", "The eks-anywhere-build-tooling source")
releaseCmd.Flags().String("build-repo-branch-name", "main", "The branch name to build bundles from")
releaseCmd.Flags().String("cli-repo-branch-name", "main", "The branch name to build EKS-A CLI from")
releaseCmd.Flags().String("artifact-dir", "downloaded-artifacts", "The base directory for artifacts")
releaseCmd.Flags().String("cdn", "https://anywhere.eks.amazonaws.com", "The URL base for artifacts")
releaseCmd.Flags().String("source-bucket", "eks-a-source-bucket", "The bucket name where the built/staging artifacts are located to download")
releaseCmd.Flags().String("release-bucket", "eks-a-release-bucket", "The bucket name where released artifacts live")
releaseCmd.Flags().String("source-container-registry", "", "The container registry to pull images from for a dev release")
releaseCmd.Flags().String("release-container-registry", "", "The container registry that images wll be pushed to")
releaseCmd.Flags().Bool("dev-release", true, "Flag to indicate a dev release")
releaseCmd.Flags().Bool("bundle-release", true, "Flag to indicate a bundle release")
releaseCmd.Flags().String("release-environment", "", "Release environment")
releaseCmd.Flags().Bool("dry-run", false, "Flag to indicate if the release is a dry run")
releaseCmd.Flags().Bool("weekly", false, "Flag to indicate a weekly bundle release")
}
| 331 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"os"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var cfgFile string
// rootCmd represents the base command when called without any subcommands.
var rootCmd = &cobra.Command{
Use: "eks-anywhere-release",
Short: "A release tool for EKS Anywhere",
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.eks-anywhere.yaml)")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Search config in home directory with name ".eks-anywhere" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".eks-anywhere")
}
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
| 77 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package assets
import (
"fmt"
"path/filepath"
"strconv"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/assets/archives"
assetconfig "github.com/aws/eks-anywhere/release/pkg/assets/config"
"github.com/aws/eks-anywhere/release/pkg/assets/images"
"github.com/aws/eks-anywhere/release/pkg/assets/manifests"
"github.com/aws/eks-anywhere/release/pkg/assets/tagger"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
sliceutils "github.com/aws/eks-anywhere/release/pkg/util/slices"
)
func getAssetsFromConfig(ac *assettypes.AssetConfig, rc *releasetypes.ReleaseConfig, eksDReleaseChannel, eksDReleaseNumber, kubeVersion string) ([]releasetypes.Artifact, error) {
var artifacts []releasetypes.Artifact
var imageTagOverrides []releasetypes.ImageTagOverride
projectName := ac.ProjectName
projectPath := ac.ProjectPath
sourcedFromBranch := rc.BuildRepoBranchName
gitTagPath := projectPath
if ac.HasSeparateTagPerReleaseBranch {
gitTagPath = filepath.Join(projectPath, eksDReleaseChannel)
}
// Get git tag for project if exists
gitTag, err := tagger.GetGitTagAssigner(ac)(rc, gitTagPath, sourcedFromBranch)
if err != nil {
return nil, fmt.Errorf("error getting git tag for project %s: %v", projectName, err)
}
// Add project images to artifacts list
for _, image := range ac.Images {
imageArtifact, sourceRepoName, err := images.GetImageAssets(rc, ac, image, ac.ImageRepoPrefix, ac.ImageTagOptions, gitTag, projectPath, gitTagPath, eksDReleaseChannel, eksDReleaseNumber, kubeVersion)
if err != nil {
return nil, fmt.Errorf("error getting image artifact: %v", err)
}
artifacts = append(artifacts, releasetypes.Artifact{Image: imageArtifact})
imageTagOverrides = append(imageTagOverrides, releasetypes.ImageTagOverride{
Repository: sourceRepoName,
ReleaseUri: imageArtifact.ReleaseImageURI,
})
if ac.UsesKubeRbacProxy {
kubeRbacProxyImageTagOverride, err := bundleutils.GetKubeRbacProxyImageTagOverride(rc)
if err != nil {
return nil, fmt.Errorf("error getting kube-rbac-proxy image tag override: %v", err)
}
imageTagOverrides = append(imageTagOverrides, kubeRbacProxyImageTagOverride)
}
}
// Add manifests to artifacts list
for _, manifestComponent := range ac.Manifests {
for _, manifestFile := range manifestComponent.ManifestFiles {
manifestArtifact, err := manifests.GetManifestAssets(rc, manifestComponent, manifestFile, projectName, projectPath, gitTag, sourcedFromBranch, imageTagOverrides)
if err != nil {
return nil, fmt.Errorf("error getting manifest artifact: %v", err)
}
artifacts = append(artifacts, releasetypes.Artifact{Manifest: manifestArtifact})
}
}
// Add archives to artifacts list
for _, archive := range ac.Archives {
archiveArtifact, err := archives.GetArchiveAssets(rc, archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion)
if err != nil {
return nil, fmt.Errorf("error getting archive artifact: %v", err)
}
artifacts = append(artifacts, releasetypes.Artifact{Archive: archiveArtifact})
}
return artifacts, nil
}
func GetBundleReleaseAssets(supportedK8sVersions []string, eksDReleaseMap *filereader.EksDLatestReleases, rc *releasetypes.ReleaseConfig) (map[string][]releasetypes.Artifact, error) {
artifactsTable := map[string][]releasetypes.Artifact{}
assetConfigs := assetconfig.GetBundleReleaseAssetsConfigMap()
for _, release := range eksDReleaseMap.Releases {
channel := release.Branch
number := strconv.Itoa(release.Number)
kubeVersion := release.KubeVersion
if !sliceutils.SliceContains(supportedK8sVersions, channel) {
continue
}
for _, assetConfig := range assetConfigs {
if !rc.DevRelease && assetConfig.OnlyForDevRelease {
continue
}
projectName := assetConfig.ProjectName
if assetConfig.HasReleaseBranches {
projectName = fmt.Sprintf("%s-%s", projectName, channel)
}
artifactsList, err := getAssetsFromConfig(&assetConfig, rc, channel, number, kubeVersion)
if err != nil {
return nil, errors.Wrapf(err, "Error getting artifacts for project %s", projectName)
}
artifactsTable[projectName] = artifactsList
}
}
return artifactsTable, nil
}
| 130 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package archives
import (
"fmt"
"path/filepath"
"github.com/pkg/errors"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
sliceutils "github.com/aws/eks-anywhere/release/pkg/util/slices"
)
func EksDistroArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion, latestPath, arch string) (string, string, string, string, error) {
var sourceS3Key string
var sourceS3Prefix string
var releaseS3Path string
var releaseName string
bottlerocketSupportedK8sVersions, err := filereader.GetBottlerocketSupportedK8sVersionsByFormat(rc, archive.Format)
if err != nil {
return "", "", "", "", errors.Cause(err)
}
fmt.Println(bottlerocketSupportedK8sVersions)
if archive.OSName == "bottlerocket" && !sliceutils.SliceContains(bottlerocketSupportedK8sVersions, eksDReleaseChannel) {
return "", "", "", "", nil
}
imageExtensions := map[string]string{
"ami": "gz",
"ova": "ova",
"raw": "gz",
}
imageExtension := imageExtensions[archive.Format]
if archive.OSName == "bottlerocket" && (archive.Format == "ami" || archive.Format == "raw") {
imageExtension = "img.gz"
}
if rc.DevRelease || rc.ReleaseEnvironment == "development" {
sourceS3Key = fmt.Sprintf("%s.%s", archive.OSName, imageExtension)
sourceS3Prefix = fmt.Sprintf("%s/%s/%s/%s/%s", projectPath, eksDReleaseChannel, archive.Format, archive.OSName, latestPath)
} else {
sourceS3Key = fmt.Sprintf("%s-%s-eks-d-%s-%s-eks-a-%d-%s.%s",
archive.OSName,
kubeVersion,
eksDReleaseChannel,
eksDReleaseNumber,
rc.BundleNumber,
arch,
imageExtension,
)
sourceS3Prefix = fmt.Sprintf("releases/bundles/%d/artifacts/%s/%s", rc.BundleNumber, archive.Format, eksDReleaseChannel)
}
if rc.DevRelease {
releaseName = fmt.Sprintf("%s-%s-eks-d-%s-%s-eks-a-%s-%s.%s",
archive.OSName,
kubeVersion,
eksDReleaseChannel,
eksDReleaseNumber,
rc.DevReleaseUriVersion,
arch,
imageExtension,
)
releaseS3Path = fmt.Sprintf("artifacts/%s/eks-distro/%s/%s/%s-%s",
rc.DevReleaseUriVersion,
archive.Format,
eksDReleaseChannel,
eksDReleaseChannel,
eksDReleaseNumber,
)
} else {
releaseName = fmt.Sprintf("%s-%s-eks-d-%s-%s-eks-a-%d-%s.%s",
archive.OSName,
kubeVersion,
eksDReleaseChannel,
eksDReleaseNumber,
rc.BundleNumber,
arch,
imageExtension,
)
releaseS3Path = fmt.Sprintf("releases/bundles/%d/artifacts/%s/%s", rc.BundleNumber, archive.Format, eksDReleaseChannel)
}
return sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, nil
}
func TarballArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion, latestPath, arch string) (string, string, string, string, error) {
os := "linux"
var sourceS3Key string
var sourceS3Prefix string
var releaseS3Path string
var releaseName string
if rc.DevRelease || rc.ReleaseEnvironment == "development" {
sourceS3Key = fmt.Sprintf("%s-%s-%s-%s.tar.gz", archive.Name, os, arch, gitTag)
sourceS3Prefix = fmt.Sprintf("%s/%s", projectPath, latestPath)
} else {
sourceS3Key = fmt.Sprintf("%s-%s-%s.tar.gz", archive.Name, os, arch)
sourceS3Prefix = fmt.Sprintf("releases/bundles/%d/artifacts/%s/%s", rc.BundleNumber, archive.Name, gitTag)
}
if rc.DevRelease {
releaseName = fmt.Sprintf("%s-%s-%s-%s.tar.gz", archive.Name, rc.DevReleaseUriVersion, os, arch)
releaseS3Path = fmt.Sprintf("artifacts/%s/%s/%s", rc.DevReleaseUriVersion, archive.Name, gitTag)
} else {
releaseName = fmt.Sprintf("%s-%s-%s.tar.gz", archive.Name, os, arch)
releaseS3Path = fmt.Sprintf("releases/bundles/%d/artifacts/%s/%s", rc.BundleNumber, archive.Name, gitTag)
}
return sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, nil
}
func KernelArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion, latestPath, arch string) (string, string, string, string, error) {
var sourceS3Prefix string
var releaseS3Path string
sourceS3Key, releaseName := archive.Name, archive.Name
if rc.DevRelease || rc.ReleaseEnvironment == "development" {
sourceS3Prefix = fmt.Sprintf("%s/%s/%s", projectPath, latestPath, gitTag)
} else {
sourceS3Prefix = fmt.Sprintf("releases/bundles/%d/artifacts/hook/%s", rc.BundleNumber, gitTag)
}
if rc.DevRelease {
releaseS3Path = fmt.Sprintf("artifacts/%s/hook/%s", rc.DevReleaseUriVersion, gitTag)
} else {
releaseS3Path = fmt.Sprintf("releases/bundles/%d/artifacts/hook/%s", rc.BundleNumber, gitTag)
}
return sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, nil
}
func GetArchiveAssets(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion string) (*releasetypes.ArchiveArtifact, error) {
os := "linux"
arch := "amd64"
if archive.ArchitectureOverride != "" {
arch = archive.ArchitectureOverride
}
sourcedFromBranch := rc.BuildRepoBranchName
latestPath := artifactutils.GetLatestUploadDestination(sourcedFromBranch)
sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, err := getArtifactPathGenerator(archive)(rc, archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion, latestPath, arch)
if err != nil {
return nil, errors.Cause(err)
}
if sourceS3Key == "" && err == nil {
return nil, nil
}
cdnURI, err := artifactutils.GetURI(rc.CDN, filepath.Join(releaseS3Path, releaseName))
if err != nil {
return nil, errors.Cause(err)
}
archiveArtifact := &releasetypes.ArchiveArtifact{
SourceS3Key: sourceS3Key,
SourceS3Prefix: sourceS3Prefix,
ArtifactPath: filepath.Join(rc.ArtifactDir, fmt.Sprintf("%s-%s", archive.Name, archive.Format), eksDReleaseChannel, rc.BuildRepoHead),
ReleaseName: releaseName,
ReleaseS3Path: releaseS3Path,
ReleaseCdnURI: cdnURI,
OS: os,
OSName: archive.OSName,
Arch: []string{arch},
GitTag: gitTag,
ProjectPath: projectPath,
SourcedFromBranch: sourcedFromBranch,
ImageFormat: archive.Format,
}
return archiveArtifact, nil
}
func getArtifactPathGenerator(archive *assettypes.Archive) assettypes.ArchiveS3PathGenerator {
if archive.ArchiveS3PathGetter != nil {
return assettypes.ArchiveS3PathGenerator(archive.ArchiveS3PathGetter)
}
return assettypes.ArchiveS3PathGenerator(TarballArtifactPathGetter)
}
| 196 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package archives
import (
"reflect"
"strings"
"testing"
"time"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
var releaseConfig = &releasetypes.ReleaseConfig{
ArtifactDir: "artifacts",
CliRepoSource: "eks-a-build",
BuildRepoSource: "eks-a-cli",
CliRepoBranchName: "main",
CliRepoUrl: "https://github.com/aws/eks-anywhere.git",
BuildRepoUrl: "https://github.com/aws/eks-anywhere-build-tooling.git",
SourceBucket: "projectbuildpipeline-857-pipelineoutputartifactsb-10ajmk30khe3f",
ReleaseBucket: "release-bucket",
SourceContainerRegistry: "source-container-registry",
ReleaseContainerRegistry: "release-container-registry",
CDN: "https://release-bucket",
BundleNumber: 1,
ReleaseNumber: 1,
ReleaseVersion: "vDev",
ReleaseTime: time.Unix(0, 0),
DevRelease: true,
DryRun: true,
}
func TestGenerateArchiveAssets(t *testing.T) {
testCases := []struct {
testName string
archive *assettypes.Archive
buildRepoBranchName string
projectPath string
gitTag string
eksDReleaseChannel string
eksDReleaseNumber string
kubeVersion string
wantArchiveArtifact *releasetypes.ArchiveArtifact
wantErr bool
}{
{
testName: "Tarball archive for project foo/bar from main",
buildRepoBranchName: "main",
projectPath: "projects/foo/bar",
gitTag: "v0.1.0",
eksDReleaseChannel: "1-21",
eksDReleaseNumber: "8",
kubeVersion: "1.21.9",
archive: &assettypes.Archive{
Name: "baz",
Format: "tarball",
},
wantArchiveArtifact: &releasetypes.ArchiveArtifact{
SourceS3Key: "baz-linux-amd64-v0.1.0.tar.gz",
SourceS3Prefix: "projects/foo/bar/latest",
ArtifactPath: "artifacts/baz-tarball/1-21",
ReleaseName: "baz-v0.0.0-dev-build.0-linux-amd64.tar.gz",
ReleaseS3Path: "artifacts/v0.0.0-dev-build.0/baz/v0.1.0",
ReleaseCdnURI: "https://release-bucket/artifacts/v0.0.0-dev-build.0/baz/v0.1.0/baz-v0.0.0-dev-build.0-linux-amd64.tar.gz",
OS: "linux",
Arch: []string{"amd64"},
GitTag: "v0.1.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "main",
ImageFormat: "tarball",
},
wantErr: false,
},
{
testName: "Tarball archive for project foo/bar from release-branch",
buildRepoBranchName: "release-branch",
projectPath: "projects/foo/bar",
gitTag: "v0.2.0",
eksDReleaseChannel: "1-22",
eksDReleaseNumber: "6",
kubeVersion: "1.22.6",
archive: &assettypes.Archive{
Name: "baz",
Format: "tarball",
},
wantArchiveArtifact: &releasetypes.ArchiveArtifact{
SourceS3Key: "baz-linux-amd64-v0.2.0.tar.gz",
SourceS3Prefix: "projects/foo/bar/release-branch",
ArtifactPath: "artifacts/baz-tarball/1-22",
ReleaseName: "baz-v0.0.0-dev-release-branch-build.0-linux-amd64.tar.gz",
ReleaseS3Path: "artifacts/v0.0.0-dev-release-branch-build.0/baz/v0.2.0",
ReleaseCdnURI: "https://release-bucket/artifacts/v0.0.0-dev-release-branch-build.0/baz/v0.2.0/baz-v0.0.0-dev-release-branch-build.0-linux-amd64.tar.gz",
OS: "linux",
Arch: []string{"amd64"},
GitTag: "v0.2.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "release-branch",
ImageFormat: "tarball",
},
wantErr: false,
},
{
testName: "OS image archive for project foo/bar from main",
buildRepoBranchName: "main",
projectPath: "projects/foo/bar",
gitTag: "v0.1.0",
eksDReleaseChannel: "1-21",
eksDReleaseNumber: "8",
kubeVersion: "1.21.9",
archive: &assettypes.Archive{
Name: "baz",
OSName: "lorem",
Format: "ova",
ArchiveS3PathGetter: EksDistroArtifactPathGetter,
},
wantArchiveArtifact: &releasetypes.ArchiveArtifact{
SourceS3Key: "lorem.ova",
SourceS3Prefix: "projects/foo/bar/1-21/ova/lorem/latest",
ArtifactPath: "artifacts/baz-ova/1-21",
ReleaseName: "lorem-1.21.9-eks-d-1-21-8-eks-a-v0.0.0-dev-build.0-amd64.ova",
ReleaseS3Path: "artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-21/1-21-8",
ReleaseCdnURI: "https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-21/1-21-8/lorem-1.21.9-eks-d-1-21-8-eks-a-v0.0.0-dev-build.0-amd64.ova",
OS: "linux",
OSName: "lorem",
Arch: []string{"amd64"},
GitTag: "v0.1.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "main",
ImageFormat: "ova",
},
wantErr: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
releaseConfig.BuildRepoBranchName = tt.buildRepoBranchName
releaseVersion, err := filereader.GetCurrentEksADevReleaseVersion(releaseConfig.ReleaseVersion, releaseConfig, 0)
if err != nil {
t.Fatalf("Error getting previous EKS-A dev release number: %v\n", err)
}
releaseConfig.ReleaseVersion = releaseVersion
releaseConfig.DevReleaseUriVersion = strings.ReplaceAll(releaseVersion, "+", "-")
if gotArchiveArtifact, err := GetArchiveAssets(releaseConfig, tt.archive, tt.projectPath, tt.gitTag, tt.eksDReleaseChannel, tt.eksDReleaseNumber, tt.kubeVersion); (err != nil) != tt.wantErr {
t.Fatalf("GetArchiveAssets err = %v, want err = %v", err, tt.wantErr)
} else if !reflect.DeepEqual(gotArchiveArtifact, tt.wantArchiveArtifact) {
t.Fatalf("GetArchiveAssets got artifact = %v, expected %v", gotArchiveArtifact, tt.wantArchiveArtifact)
}
})
}
}
| 170 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"github.com/aws/eks-anywhere/release/pkg/assets/archives"
"github.com/aws/eks-anywhere/release/pkg/assets/tagger"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
)
var bundleReleaseAssetsConfigMap = []assettypes.AssetConfig{
// Boots artifacts
{
ProjectName: "boots",
ProjectPath: "projects/tinkerbell/boots",
Images: []*assettypes.Image{
{
RepoName: "boots",
},
},
ImageRepoPrefix: "tinkerbell",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Bottlerocket-bootstrap artifacts
{
ProjectName: "bottlerocket-bootstrap",
ProjectPath: "projects/aws/bottlerocket-bootstrap",
GitTagAssigner: tagger.NonExistentTagAssigner,
Images: []*assettypes.Image{
{
RepoName: "bottlerocket-bootstrap",
ImageTagConfiguration: assettypes.ImageTagConfiguration{
NonProdSourceImageTagFormat: "v<eksDReleaseChannel>-<eksDReleaseNumber>",
ProdSourceImageTagFormat: "v<eksDReleaseChannel>-<eksDReleaseNumber>",
ReleaseImageTagFormat: "v<eksDReleaseChannel>-<eksDReleaseNumber>",
},
},
{
RepoName: "bottlerocket-bootstrap-snow",
ImageTagConfiguration: assettypes.ImageTagConfiguration{
NonProdSourceImageTagFormat: "v<eksDReleaseChannel>-<eksDReleaseNumber>",
ProdSourceImageTagFormat: "v<eksDReleaseChannel>-<eksDReleaseNumber>",
ReleaseImageTagFormat: "v<eksDReleaseChannel>-<eksDReleaseNumber>",
},
},
},
ImageTagOptions: []string{
"eksDReleaseChannel",
"eksDReleaseNumber",
"gitTag",
},
HasReleaseBranches: true,
},
// Cert-manager artifacts
{
ProjectName: "cert-manager",
ProjectPath: "projects/cert-manager/cert-manager",
Images: []*assettypes.Image{
{
RepoName: "cert-manager-acmesolver",
},
{
RepoName: "cert-manager-cainjector",
},
{
RepoName: "cert-manager-controller",
},
{
RepoName: "cert-manager-ctl",
},
{
RepoName: "cert-manager-webhook",
},
},
ImageRepoPrefix: "cert-manager",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
ManifestFiles: []string{"cert-manager.yaml"},
},
},
},
// Cilium artifacts
{
ProjectName: "cilium",
ProjectPath: "projects/cilium/cilium",
Manifests: []*assettypes.ManifestComponent{
{
Name: "cilium",
ManifestFiles: []string{"cilium.yaml"},
},
},
},
// Cloud-provider-vsphere artifacts
{
ProjectName: "cloud-provider-vsphere",
ProjectPath: "projects/kubernetes/cloud-provider-vsphere",
Images: []*assettypes.Image{
{
RepoName: "manager",
AssetName: "cloud-provider-vsphere",
ImageTagConfiguration: assettypes.ImageTagConfiguration{
NonProdSourceImageTagFormat: "<gitTag>",
ProdSourceImageTagFormat: "<gitTag>-eks-d-<eksDReleaseChannel>",
ReleaseImageTagFormat: "<gitTag>-eks-d-<eksDReleaseChannel>",
},
},
},
ImageRepoPrefix: "kubernetes/cloud-provider-vsphere/cpi",
ImageTagOptions: []string{
"eksDReleaseChannel",
"gitTag",
"projectPath",
},
HasReleaseBranches: true,
HasSeparateTagPerReleaseBranch: true,
},
// Cluster-api artifacts
{
ProjectName: "cluster-api",
ProjectPath: "projects/kubernetes-sigs/cluster-api",
Images: []*assettypes.Image{
{
RepoName: "cluster-api-controller",
},
{
RepoName: "kubeadm-bootstrap-controller",
},
{
RepoName: "kubeadm-control-plane-controller",
},
},
ImageRepoPrefix: "kubernetes-sigs/cluster-api",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "cluster-api",
ManifestFiles: []string{"core-components.yaml", "metadata.yaml"},
},
{
Name: "bootstrap-kubeadm",
ManifestFiles: []string{"bootstrap-components.yaml", "metadata.yaml"},
},
{
Name: "control-plane-kubeadm",
ManifestFiles: []string{"control-plane-components.yaml", "metadata.yaml"},
},
},
},
// Cluster-api-provider-aws-snow artifacts
{
ProjectName: "cluster-api-provider-aws-snow",
ProjectPath: "projects/aws/cluster-api-provider-aws-snow",
Images: []*assettypes.Image{
{
RepoName: "manager",
AssetName: "cluster-api-snow-controller",
},
},
ImageRepoPrefix: "aws/cluster-api-provider-aws-snow",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "infrastructure-snow",
ManifestFiles: []string{"infrastructure-components.yaml", "metadata.yaml"},
},
},
},
// Cluster-api-provider-cloudstack artifacts
{
ProjectName: "cluster-api-provider-cloudstack",
ProjectPath: "projects/kubernetes-sigs/cluster-api-provider-cloudstack",
Images: []*assettypes.Image{
{
RepoName: "manager",
AssetName: "cluster-api-provider-cloudstack",
},
},
ImageRepoPrefix: "kubernetes-sigs/cluster-api-provider-cloudstack/release",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "infrastructure-cloudstack",
ManifestFiles: []string{"infrastructure-components.yaml", "metadata.yaml"},
},
},
UsesKubeRbacProxy: true,
},
// Cluster-api-provider-docker artifacts
{
ProjectName: "cluster-api-provider-docker",
ProjectPath: "projects/kubernetes-sigs/cluster-api",
Images: []*assettypes.Image{
{
RepoName: "capd-manager",
AssetName: "cluster-api-provider-docker",
},
},
ImageRepoPrefix: "kubernetes-sigs/cluster-api",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "infrastructure-docker",
ManifestFiles: []string{"infrastructure-components-development.yaml", "cluster-template-development.yaml", "metadata.yaml"},
ReleaseManifestPrefix: "cluster-api",
},
},
},
// Cluster-api-provider-nutanix artifacts
{
ProjectName: "cluster-api-provider-nutanix",
ProjectPath: "projects/nutanix-cloud-native/cluster-api-provider-nutanix",
Images: []*assettypes.Image{
{
RepoName: "cluster-api-provider-nutanix",
},
},
ImageRepoPrefix: "nutanix-cloud-native",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "infrastructure-nutanix",
ManifestFiles: []string{"infrastructure-components.yaml", "cluster-template.yaml", "metadata.yaml"},
},
},
UsesKubeRbacProxy: true,
},
// Cluster-api-provider-tinkerbell artifacts
{
ProjectName: "cluster-api-provider-tinkerbell",
ProjectPath: "projects/tinkerbell/cluster-api-provider-tinkerbell",
Images: []*assettypes.Image{
{
RepoName: "cluster-api-provider-tinkerbell",
},
},
ImageRepoPrefix: "tinkerbell",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "infrastructure-tinkerbell",
ManifestFiles: []string{"infrastructure-components.yaml", "cluster-template.yaml", "metadata.yaml"},
},
},
},
// Cluster-api-provider-vsphere artifacts
{
ProjectName: "cluster-api-provider-vsphere",
ProjectPath: "projects/kubernetes-sigs/cluster-api-provider-vsphere",
Images: []*assettypes.Image{
{
RepoName: "manager",
AssetName: "cluster-api-provider-vsphere",
},
},
ImageRepoPrefix: "kubernetes-sigs/cluster-api-provider-vsphere/release",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "infrastructure-vsphere",
ManifestFiles: []string{"infrastructure-components.yaml", "cluster-template.yaml", "metadata.yaml"},
},
},
},
// Containerd artifacts
{
ProjectName: "containerd",
ProjectPath: "projects/containerd/containerd",
Archives: []*assettypes.Archive{
{
Name: "containerd",
Format: "tarball",
},
},
},
// Image-builder cli artifacts
{
ProjectName: "image-builder",
ProjectPath: "projects/aws/image-builder",
Archives: []*assettypes.Archive{
{
Name: "image-builder",
Format: "tarball",
},
},
},
// Cri-tools artifacts
{
ProjectName: "cri-tools",
ProjectPath: "projects/kubernetes-sigs/cri-tools",
Archives: []*assettypes.Archive{
{
Name: "cri-tools",
Format: "tarball",
},
},
},
// EKS-A CLI tools artifacts
{
ProjectName: "eks-anywhere-cli-tools",
ProjectPath: "projects/aws/eks-anywhere-build-tooling",
GitTagAssigner: tagger.CliGitTagAssigner,
Images: []*assettypes.Image{
{
RepoName: "eks-anywhere-cli-tools",
TrimEksAPrefix: true,
},
},
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// EKS-A cluster-controller artifacts
{
ProjectName: "eks-anywhere-cluster-controller",
ProjectPath: "projects/aws/eks-anywhere-cluster-controller",
GitTagAssigner: tagger.CliGitTagAssigner,
Images: []*assettypes.Image{
{
RepoName: "eks-anywhere-cluster-controller",
TrimEksAPrefix: true,
},
},
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "cluster-controller",
ManifestFiles: []string{"eksa-components.yaml"},
ReleaseManifestPrefix: "eks-anywhere",
NoVersionSuffix: true,
},
},
},
// EKS-A diagnostic collector artifacts
{
ProjectName: "eks-anywhere-diagnostic-collector",
ProjectPath: "projects/aws/eks-anywhere",
GitTagAssigner: tagger.CliGitTagAssigner,
Images: []*assettypes.Image{
{
RepoName: "eks-anywhere-diagnostic-collector",
TrimEksAPrefix: true,
},
},
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// EKS-A package controller artifacts
{
ProjectName: "eks-anywhere-packages",
ProjectPath: "projects/aws/eks-anywhere-packages",
Images: []*assettypes.Image{
{
RepoName: "eks-anywhere-packages",
},
{
RepoName: "ecr-token-refresher",
},
{
AssetName: "eks-anywhere-packages-helm",
RepoName: "eks-anywhere-packages",
TrimVersionSignifier: true,
ImageTagConfiguration: assettypes.ImageTagConfiguration{
NonProdSourceImageTagFormat: "<gitTag>",
},
},
},
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Etcdadm artifacts
{
ProjectName: "etcdadm",
ProjectPath: "projects/kubernetes-sigs/etcdadm",
Archives: []*assettypes.Archive{
{
Name: "etcdadm",
Format: "tarball",
},
},
},
// Etcdadm-bootstrap-provider artifacts
{
ProjectName: "etcdadm-bootstrap-provider",
ProjectPath: "projects/aws/etcdadm-bootstrap-provider",
Images: []*assettypes.Image{
{
RepoName: "etcdadm-bootstrap-provider",
},
},
ImageRepoPrefix: "aws",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "bootstrap-etcdadm-bootstrap",
ManifestFiles: []string{"bootstrap-components.yaml", "metadata.yaml"},
},
},
},
// Etcdadm-controller artifacts
{
ProjectName: "etcdadm-controller",
ProjectPath: "projects/aws/etcdadm-controller",
Images: []*assettypes.Image{
{
RepoName: "etcdadm-controller",
},
},
ImageRepoPrefix: "aws",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "bootstrap-etcdadm-controller",
ManifestFiles: []string{"bootstrap-components.yaml", "metadata.yaml"},
},
},
},
// HAProxy artifacts
{
ProjectName: "haproxy",
ProjectPath: "projects/kubernetes-sigs/kind",
Images: []*assettypes.Image{
{
RepoName: "haproxy",
},
},
ImageRepoPrefix: "kubernetes-sigs/kind",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Hegel artifacts
{
ProjectName: "hegel",
ProjectPath: "projects/tinkerbell/hegel",
Images: []*assettypes.Image{
{
RepoName: "hegel",
},
},
ImageRepoPrefix: "tinkerbell",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Helm-controller artifacts
{
ProjectName: "helm-controller",
ProjectPath: "projects/fluxcd/helm-controller",
Images: []*assettypes.Image{
{
RepoName: "helm-controller",
},
},
ImageRepoPrefix: "fluxcd",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Hook artifacts
{
ProjectName: "hook",
ProjectPath: "projects/tinkerbell/hook",
Images: []*assettypes.Image{
{
RepoName: "hook-bootkit",
},
{
RepoName: "hook-docker",
},
{
RepoName: "hook-kernel",
},
},
ImageRepoPrefix: "tinkerbell",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Archives: []*assettypes.Archive{
{
Name: "initramfs-aarch64",
Format: "kernel",
ArchitectureOverride: "arm64",
ArchiveS3PathGetter: archives.KernelArtifactPathGetter,
},
{
Name: "initramfs-x86_64",
Format: "kernel",
ArchiveS3PathGetter: archives.KernelArtifactPathGetter,
},
{
Name: "vmlinuz-aarch64",
Format: "kernel",
ArchitectureOverride: "arm64",
ArchiveS3PathGetter: archives.KernelArtifactPathGetter,
},
{
Name: "vmlinuz-x86_64",
Format: "kernel",
ArchiveS3PathGetter: archives.KernelArtifactPathGetter,
},
},
},
// Hub artifacts
{
ProjectName: "hub",
ProjectPath: "projects/tinkerbell/hub",
Images: []*assettypes.Image{
{
RepoName: "cexec",
},
{
RepoName: "image2disk",
},
{
RepoName: "kexec",
},
{
RepoName: "oci2disk",
},
{
RepoName: "reboot",
},
{
RepoName: "writefile",
},
},
ImageRepoPrefix: "tinkerbell/hub",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Image-builder artifacts
{
ProjectName: "image-builder",
ProjectPath: "projects/kubernetes-sigs/image-builder",
Archives: []*assettypes.Archive{
{
Name: "eks-distro",
OSName: "bottlerocket",
Format: "ami",
ArchiveS3PathGetter: archives.EksDistroArtifactPathGetter,
},
{
Name: "eks-distro",
OSName: "bottlerocket",
Format: "ova",
ArchiveS3PathGetter: archives.EksDistroArtifactPathGetter,
},
{
Name: "eks-distro",
OSName: "bottlerocket",
Format: "raw",
ArchiveS3PathGetter: archives.EksDistroArtifactPathGetter,
},
},
HasReleaseBranches: true,
},
// Kind artifacts
{
ProjectName: "kind",
ProjectPath: "projects/kubernetes-sigs/kind",
Images: []*assettypes.Image{
{
RepoName: "node",
AssetName: "kind-node",
ImageTagConfiguration: assettypes.ImageTagConfiguration{
NonProdSourceImageTagFormat: "<kubeVersion>-eks-<eksDReleaseChannel>-<eksDReleaseNumber>",
ProdSourceImageTagFormat: "<kubeVersion>-eks-d-<eksDReleaseChannel>-<eksDReleaseNumber>",
ReleaseImageTagFormat: "<kubeVersion>-eks-d-<eksDReleaseChannel>-<eksDReleaseNumber>",
},
},
},
ImageRepoPrefix: "kubernetes-sigs/kind",
ImageTagOptions: []string{
"gitTag",
"projectPath",
"eksDReleaseChannel",
"eksDReleaseNumber",
"kubeVersion",
"projectPath",
},
HasReleaseBranches: true,
},
// Kindnetd artifacts
{
ProjectName: "kindnetd",
ProjectPath: "projects/kubernetes-sigs/kind",
Images: []*assettypes.Image{
{
RepoName: "kindnetd",
},
},
ImageRepoPrefix: "kubernetes-sigs/kind",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
Manifests: []*assettypes.ManifestComponent{
{
Name: "kindnetd",
ManifestFiles: []string{"kindnetd.yaml"},
ReleaseManifestPrefix: "kind",
},
},
},
// Kube-rbac-proxy artifacts
{
ProjectName: "kube-rbac-proxy",
ProjectPath: "projects/brancz/kube-rbac-proxy",
Images: []*assettypes.Image{
{
RepoName: "kube-rbac-proxy",
},
},
ImageRepoPrefix: "brancz",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Kube-vip artifacts
{
ProjectName: "kube-vip",
ProjectPath: "projects/kube-vip/kube-vip",
Images: []*assettypes.Image{
{
RepoName: "kube-vip",
},
},
ImageRepoPrefix: "kube-vip",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Envoy artifacts
{
ProjectName: "envoy",
ProjectPath: "projects/envoyproxy/envoy",
Images: []*assettypes.Image{
{
RepoName: "envoy",
},
},
ImageRepoPrefix: "envoyproxy",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Kustomize-controller artifacts
{
ProjectName: "kustomize-controller",
ProjectPath: "projects/fluxcd/kustomize-controller",
Images: []*assettypes.Image{
{
RepoName: "kustomize-controller",
},
},
ImageRepoPrefix: "fluxcd",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Local-path-provisioner artifacts
{
ProjectName: "local-path-provisioner",
ProjectPath: "projects/rancher/local-path-provisioner",
Images: []*assettypes.Image{
{
RepoName: "local-path-provisioner",
},
},
ImageRepoPrefix: "rancher",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Notification-controller artifacts
{
ProjectName: "notification-controller",
ProjectPath: "projects/fluxcd/notification-controller",
Images: []*assettypes.Image{
{
RepoName: "notification-controller",
},
},
ImageRepoPrefix: "fluxcd",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Rufio artifacts
{
ProjectName: "rufio",
ProjectPath: "projects/tinkerbell/rufio",
Images: []*assettypes.Image{
{
RepoName: "rufio",
},
},
ImageRepoPrefix: "tinkerbell",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Source-controller artifacts
{
ProjectName: "source-controller",
ProjectPath: "projects/fluxcd/source-controller",
Images: []*assettypes.Image{
{
RepoName: "source-controller",
},
},
ImageRepoPrefix: "fluxcd",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Tink artifacts
{
ProjectName: "tink",
ProjectPath: "projects/tinkerbell/tink",
Images: []*assettypes.Image{
{
RepoName: "tink-controller",
},
{
RepoName: "tink-server",
},
{
RepoName: "tink-worker",
},
},
ImageRepoPrefix: "tinkerbell/tink",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
// Tinkerbell chart artifacts
{
ProjectName: "tinkerbell-chart",
ProjectPath: "projects/tinkerbell/tinkerbell-chart",
Images: []*assettypes.Image{
{
RepoName: "tinkerbell-chart",
TrimVersionSignifier: true,
ImageTagConfiguration: assettypes.ImageTagConfiguration{
NonProdSourceImageTagFormat: "<gitTag>",
},
},
},
ImageRepoPrefix: "tinkerbell",
ImageTagOptions: []string{
"gitTag",
"projectPath",
},
},
}
func GetBundleReleaseAssetsConfigMap() []assettypes.AssetConfig {
return bundleReleaseAssetsConfigMap
}
| 830 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package images
import (
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/assets/tagger"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/images"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func GetImageAssets(rc *releasetypes.ReleaseConfig, ac *assettypes.AssetConfig, image *assettypes.Image, imageRepoPrefix string, imageTagOptions []string, gitTag, projectPath, gitTagPath, eksDReleaseChannel, eksDReleaseNumber, kubeVersion string) (*releasetypes.ImageArtifact, string, error) {
repoName, assetName := image.RepoName, image.RepoName
if image.AssetName != "" {
assetName = image.AssetName
}
if imageRepoPrefix != "" {
repoName = fmt.Sprintf("%s/%s", imageRepoPrefix, repoName)
}
sourceRepoName, releaseRepoName := repoName, repoName
if image.TrimEksAPrefix {
if rc.ReleaseEnvironment == "production" {
sourceRepoName = strings.TrimPrefix(repoName, "eks-anywhere-")
}
if !rc.DevRelease {
releaseRepoName = strings.TrimPrefix(repoName, "eks-anywhere-")
}
}
imageTagOptionsMap := map[string]string{}
for _, opt := range imageTagOptions {
switch opt {
case "gitTag":
imageTagOptionsMap[opt] = gitTag
case "projectPath":
imageTagOptionsMap[opt] = projectPath
case "eksDReleaseChannel":
imageTagOptionsMap[opt] = eksDReleaseChannel
case "eksDReleaseNumber":
imageTagOptionsMap[opt] = eksDReleaseNumber
case "kubeVersion":
imageTagOptionsMap[opt] = kubeVersion
case "buildRepoSourceRevision":
imageTagOptionsMap[opt] = rc.BuildRepoHead
default:
return nil, "", fmt.Errorf("error configuring image tag options: invalid option: %s", opt)
}
}
sourceImageUri, sourcedFromBranch, err := images.GetSourceImageURI(rc, assetName, sourceRepoName, imageTagOptionsMap, image.ImageTagConfiguration, image.TrimVersionSignifier, ac.HasSeparateTagPerReleaseBranch)
if err != nil {
return nil, "", errors.Cause(err)
}
if sourcedFromBranch != rc.BuildRepoBranchName {
gitTag, err := tagger.GetGitTagAssigner(ac)(rc, gitTagPath, sourcedFromBranch)
if err != nil {
return nil, "", errors.Cause(err)
}
imageTagOptionsMap["gitTag"] = gitTag
}
releaseImageUri, err := images.GetReleaseImageURI(rc, assetName, releaseRepoName, imageTagOptionsMap, image.ImageTagConfiguration, image.TrimVersionSignifier, ac.HasSeparateTagPerReleaseBranch)
if err != nil {
return nil, "", errors.Cause(err)
}
imageArtifact := &releasetypes.ImageArtifact{
AssetName: assetName,
SourceImageURI: sourceImageUri,
ReleaseImageURI: releaseImageUri,
Arch: []string{"amd64", "arm64"},
OS: "linux",
GitTag: gitTag,
ProjectPath: projectPath,
SourcedFromBranch: sourcedFromBranch,
}
return imageArtifact, sourceRepoName, nil
}
| 98 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package images
import (
"reflect"
"strings"
"testing"
"time"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
var releaseConfig = &releasetypes.ReleaseConfig{
ArtifactDir: "artifacts",
CliRepoSource: "eks-a-build",
BuildRepoSource: "eks-a-cli",
CliRepoBranchName: "main",
CliRepoUrl: "https://github.com/aws/eks-anywhere.git",
BuildRepoUrl: "https://github.com/aws/eks-anywhere-build-tooling.git",
SourceBucket: "projectbuildpipeline-857-pipelineoutputartifactsb-10ajmk30khe3f",
ReleaseBucket: "release-bucket",
SourceContainerRegistry: "source-container-registry",
ReleaseContainerRegistry: "release-container-registry",
CDN: "https://release-bucket",
BundleNumber: 1,
ReleaseNumber: 1,
ReleaseVersion: "vDev",
ReleaseTime: time.Unix(0, 0),
DevRelease: true,
DryRun: true,
}
func TestGenerateImageAssets(t *testing.T) {
testCases := []struct {
testName string
image *assettypes.Image
imageRepoPrefix string
imageTagOptions []string
assetConfig *assettypes.AssetConfig
buildRepoBranchName string
projectPath string
gitTag string
eksDReleaseChannel string
eksDReleaseNumber string
kubeVersion string
wantImageArtifact *releasetypes.ImageArtifact
wantErr bool
}{
{
testName: "Image artifact for project foo/bar from main",
buildRepoBranchName: "main",
projectPath: "projects/foo/bar",
gitTag: "v0.1.0",
eksDReleaseChannel: "1-21",
eksDReleaseNumber: "8",
kubeVersion: "1.21.9",
assetConfig: &assettypes.AssetConfig{},
image: &assettypes.Image{
RepoName: "bar",
},
imageRepoPrefix: "foo",
imageTagOptions: []string{"gitTag"},
wantImageArtifact: &releasetypes.ImageArtifact{
AssetName: "bar",
SourceImageURI: "source-container-registry/foo/bar:latest",
ReleaseImageURI: "release-container-registry/foo/bar:v0.1.0-eks-a-v0.0.0-dev-build.1",
OS: "linux",
Arch: []string{"amd64", "arm64"},
GitTag: "v0.1.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "main",
},
wantErr: false,
},
{
testName: "Image artifact for project foo/bar from release-branch",
buildRepoBranchName: "release-branch",
projectPath: "projects/foo/bar",
gitTag: "v0.2.0",
eksDReleaseChannel: "1-22",
eksDReleaseNumber: "5",
kubeVersion: "1.22.4",
assetConfig: &assettypes.AssetConfig{},
image: &assettypes.Image{
RepoName: "bar",
},
imageRepoPrefix: "foo",
imageTagOptions: []string{"gitTag"},
wantImageArtifact: &releasetypes.ImageArtifact{
AssetName: "bar",
SourceImageURI: "source-container-registry/foo/bar:release-branch",
ReleaseImageURI: "release-container-registry/foo/bar:v0.2.0-eks-a-v0.0.0-dev-release-branch-build.1",
OS: "linux",
Arch: []string{"amd64", "arm64"},
GitTag: "v0.2.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "release-branch",
},
},
{
testName: "Image artifact for project foo/bar from main with asset name override",
buildRepoBranchName: "main",
projectPath: "projects/foo/bar",
gitTag: "v0.1.0",
eksDReleaseChannel: "1-21",
eksDReleaseNumber: "8",
kubeVersion: "1.21.9",
assetConfig: &assettypes.AssetConfig{},
image: &assettypes.Image{
RepoName: "bar",
AssetName: "lorem-ipsum",
},
imageRepoPrefix: "foo",
imageTagOptions: []string{"gitTag"},
wantImageArtifact: &releasetypes.ImageArtifact{
AssetName: "lorem-ipsum",
SourceImageURI: "source-container-registry/foo/bar:latest",
ReleaseImageURI: "release-container-registry/foo/bar:v0.1.0-eks-a-v0.0.0-dev-build.1",
OS: "linux",
Arch: []string{"amd64", "arm64"},
GitTag: "v0.1.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "main",
},
wantErr: false,
},
{
testName: "Image artifact for project foo/bar from main with custom tagging configurations",
buildRepoBranchName: "main",
projectPath: "projects/foo/bar",
gitTag: "v0.3.0",
eksDReleaseChannel: "1-21",
eksDReleaseNumber: "8",
kubeVersion: "1.21.9",
assetConfig: &assettypes.AssetConfig{},
image: &assettypes.Image{
RepoName: "bar",
AssetName: "custom-bar",
ImageTagConfiguration: assettypes.ImageTagConfiguration{
NonProdSourceImageTagFormat: "<gitTag>-<kubeVersion>-baz-<eksDReleaseChannel>-bar",
ReleaseImageTagFormat: "<eksDReleaseChannel>-<eksDReleaseNumber>-<kubeVersion>-baz-bar",
},
},
imageRepoPrefix: "foo",
imageTagOptions: []string{"gitTag", "eksDReleaseChannel", "eksDReleaseNumber", "kubeVersion"},
wantImageArtifact: &releasetypes.ImageArtifact{
AssetName: "custom-bar",
SourceImageURI: "source-container-registry/foo/bar:v0.3.0-1.21.9-baz-1-21-bar-latest",
ReleaseImageURI: "release-container-registry/foo/bar:1-21-8-1.21.9-baz-bar-eks-a-v0.0.0-dev-build.1",
OS: "linux",
Arch: []string{"amd64", "arm64"},
GitTag: "v0.3.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "main",
},
wantErr: false,
},
{
testName: "Image artifact for project foo/bar from main with incorrect image tag option",
buildRepoBranchName: "main",
projectPath: "projects/foo/bar",
gitTag: "v0.1.0",
eksDReleaseChannel: "1-21",
eksDReleaseNumber: "8",
kubeVersion: "1.21.9",
assetConfig: &assettypes.AssetConfig{},
image: &assettypes.Image{
RepoName: "bar",
},
imageRepoPrefix: "foo",
imageTagOptions: []string{"non-existent-option"},
wantImageArtifact: nil,
wantErr: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
releaseConfig.BuildRepoBranchName = tt.buildRepoBranchName
releaseVersion, err := filereader.GetCurrentEksADevReleaseVersion(releaseConfig.ReleaseVersion, releaseConfig, 0)
if err != nil {
t.Fatalf("Error getting previous EKS-A dev release number: %v\n", err)
}
releaseConfig.ReleaseVersion = releaseVersion
releaseConfig.DevReleaseUriVersion = strings.ReplaceAll(releaseVersion, "+", "-")
if gotImageArtifact, _, err := GetImageAssets(releaseConfig, tt.assetConfig, tt.image, tt.imageRepoPrefix, tt.imageTagOptions, tt.gitTag, tt.projectPath, tt.projectPath, tt.eksDReleaseChannel, tt.eksDReleaseNumber, tt.kubeVersion); (err != nil) != tt.wantErr {
t.Fatalf("GetImageAssets got err = %v, want err = %v", err, tt.wantErr)
} else if !reflect.DeepEqual(gotImageArtifact, tt.wantImageArtifact) {
t.Fatalf("GetImageAssets got artifact = %v, expected %v", gotImageArtifact, tt.wantImageArtifact)
}
})
}
}
| 212 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifests
import (
"fmt"
"path/filepath"
"github.com/pkg/errors"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
func GetManifestAssets(rc *releasetypes.ReleaseConfig, manifestComponent *assettypes.ManifestComponent, manifestFile, projectName, projectPath, gitTag, sourcedFromBranch string, imageTagOverrides []releasetypes.ImageTagOverride) (*releasetypes.ManifestArtifact, error) {
componentName := manifestComponent.Name
var sourceS3Prefix string
var releaseS3Path string
latestPath := artifactutils.GetLatestUploadDestination(sourcedFromBranch)
manifestPrefixFolder := projectName
if manifestComponent.ReleaseManifestPrefix != "" {
manifestPrefixFolder = manifestComponent.ReleaseManifestPrefix
}
if rc.DevRelease || rc.ReleaseEnvironment == "development" {
sourceS3Prefix = fmt.Sprintf("%s/%s/manifests/%s", projectPath, latestPath, componentName)
if !manifestComponent.NoVersionSuffix {
sourceS3Prefix = fmt.Sprintf("%s/%s", sourceS3Prefix, gitTag)
}
} else {
sourceS3Prefix = fmt.Sprintf("releases/bundles/%d/artifacts/%s/manifests/%s/%s", rc.BundleNumber, manifestPrefixFolder, componentName, gitTag)
}
if rc.DevRelease {
releaseS3Path = fmt.Sprintf("artifacts/%s/%s/manifests/%s/%s", rc.DevReleaseUriVersion, manifestPrefixFolder, componentName, gitTag)
} else {
releaseS3Path = fmt.Sprintf("releases/bundles/%d/artifacts/%s/manifests/%s/%s", rc.BundleNumber, manifestPrefixFolder, componentName, gitTag)
}
cdnURI, err := artifactutils.GetURI(rc.CDN, filepath.Join(releaseS3Path, manifestFile))
if err != nil {
return nil, errors.Cause(err)
}
manifestArtifact := &releasetypes.ManifestArtifact{
SourceS3Key: manifestFile,
SourceS3Prefix: sourceS3Prefix,
ArtifactPath: filepath.Join(rc.ArtifactDir, fmt.Sprintf("%s-manifests", projectName), rc.BuildRepoHead),
ReleaseName: manifestFile,
ReleaseS3Path: releaseS3Path,
ReleaseCdnURI: cdnURI,
ImageTagOverrides: imageTagOverrides,
GitTag: gitTag,
ProjectPath: projectPath,
SourcedFromBranch: sourcedFromBranch,
Component: componentName,
}
return manifestArtifact, nil
}
| 75 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifests
import (
"reflect"
"strings"
"testing"
"time"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
var releaseConfig = &releasetypes.ReleaseConfig{
ArtifactDir: "artifacts",
CliRepoSource: "eks-a-build",
BuildRepoSource: "eks-a-cli",
CliRepoBranchName: "main",
CliRepoUrl: "https://github.com/aws/eks-anywhere.git",
BuildRepoUrl: "https://github.com/aws/eks-anywhere-build-tooling.git",
SourceBucket: "projectbuildpipeline-857-pipelineoutputartifactsb-10ajmk30khe3f",
ReleaseBucket: "release-bucket",
SourceContainerRegistry: "source-container-registry",
ReleaseContainerRegistry: "release-container-registry",
CDN: "https://release-bucket",
BundleNumber: 1,
ReleaseNumber: 1,
ReleaseVersion: "vDev",
ReleaseTime: time.Unix(0, 0),
DevRelease: true,
DryRun: true,
}
func TestGenerateManifestAssets(t *testing.T) {
testCases := []struct {
testName string
manifestComponent *assettypes.ManifestComponent
manifestFile string
imageTagOverrides []releasetypes.ImageTagOverride
buildRepoBranchName string
projectName string
projectPath string
gitTag string
wantManifestArtifact *releasetypes.ManifestArtifact
wantErr bool
}{
{
testName: "Manifest artifact for project foo/bar from main",
buildRepoBranchName: "main",
projectName: "bar",
projectPath: "projects/foo/bar",
gitTag: "v0.1.0",
manifestComponent: &assettypes.ManifestComponent{
Name: "bar",
ReleaseManifestPrefix: "bar-manifests",
},
manifestFile: "components.yaml",
imageTagOverrides: []releasetypes.ImageTagOverride{
{
Repository: "foo/bar",
ReleaseUri: "release-container-registry/foo/bar:v0.1.0-eks-a-v0.0.0-dev-build.1",
},
},
wantManifestArtifact: &releasetypes.ManifestArtifact{
SourceS3Prefix: "projects/foo/bar/latest/manifests/bar/v0.1.0",
SourceS3Key: "components.yaml",
ArtifactPath: "artifacts/bar-manifests",
ReleaseName: "components.yaml",
ReleaseS3Path: "artifacts/v0.0.0-dev-build.0/bar-manifests/manifests/bar/v0.1.0",
ReleaseCdnURI: "https://release-bucket/artifacts/v0.0.0-dev-build.0/bar-manifests/manifests/bar/v0.1.0/components.yaml",
ImageTagOverrides: []releasetypes.ImageTagOverride{
{
Repository: "foo/bar",
ReleaseUri: "release-container-registry/foo/bar:v0.1.0-eks-a-v0.0.0-dev-build.1",
},
},
GitTag: "v0.1.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "main",
Component: "bar",
},
wantErr: false,
},
{
testName: "Manifest artifact for project foo/bar from release-branch",
buildRepoBranchName: "release-branch",
projectName: "bar",
projectPath: "projects/foo/bar",
gitTag: "v0.1.0",
manifestComponent: &assettypes.ManifestComponent{
Name: "bar",
ReleaseManifestPrefix: "bar-manifests",
},
manifestFile: "components.yaml",
imageTagOverrides: []releasetypes.ImageTagOverride{
{
Repository: "foo/bar",
ReleaseUri: "release-container-registry/foo/bar:v0.1.0-eks-a-v0.0.0-dev-release-branch-build.1",
},
},
wantManifestArtifact: &releasetypes.ManifestArtifact{
SourceS3Prefix: "projects/foo/bar/release-branch/manifests/bar/v0.1.0",
SourceS3Key: "components.yaml",
ArtifactPath: "artifacts/bar-manifests",
ReleaseName: "components.yaml",
ReleaseS3Path: "artifacts/v0.0.0-dev-release-branch-build.0/bar-manifests/manifests/bar/v0.1.0",
ReleaseCdnURI: "https://release-bucket/artifacts/v0.0.0-dev-release-branch-build.0/bar-manifests/manifests/bar/v0.1.0/components.yaml",
ImageTagOverrides: []releasetypes.ImageTagOverride{
{
Repository: "foo/bar",
ReleaseUri: "release-container-registry/foo/bar:v0.1.0-eks-a-v0.0.0-dev-release-branch-build.1",
},
},
GitTag: "v0.1.0",
ProjectPath: "projects/foo/bar",
SourcedFromBranch: "release-branch",
Component: "bar",
},
wantErr: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
releaseConfig.BuildRepoBranchName = tt.buildRepoBranchName
releaseVersion, err := filereader.GetCurrentEksADevReleaseVersion(releaseConfig.ReleaseVersion, releaseConfig, 0)
if err != nil {
t.Fatalf("Error getting previous EKS-A dev release number: %v\n", err)
}
releaseConfig.ReleaseVersion = releaseVersion
releaseConfig.DevReleaseUriVersion = strings.ReplaceAll(releaseVersion, "+", "-")
if gotManifestArtifact, err := GetManifestAssets(releaseConfig, tt.manifestComponent, tt.manifestFile, tt.projectName, tt.projectPath, tt.gitTag, tt.buildRepoBranchName, tt.imageTagOverrides); (err != nil) != tt.wantErr {
t.Fatalf("GetManifestAssets got err = %v, want err = %v", err, tt.wantErr)
} else if !reflect.DeepEqual(gotManifestArtifact, tt.wantManifestArtifact) {
t.Fatalf("GetManifestAssets got artifact = %v, expected %v", gotManifestArtifact, tt.wantManifestArtifact)
}
})
}
}
| 157 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tagger
import (
"strings"
"github.com/pkg/errors"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/filereader"
"github.com/aws/eks-anywhere/release/pkg/git"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func BuildToolingGitTagAssigner(rc *releasetypes.ReleaseConfig, gitTagPath, overrideBranch string) (string, error) {
branchName := rc.BuildRepoBranchName
if overrideBranch != "" {
branchName = overrideBranch
}
gitTag, err := filereader.ReadGitTag(gitTagPath, rc.BuildRepoSource, branchName)
if err != nil {
return "", errors.Cause(err)
}
return gitTag, nil
}
func CliGitTagAssigner(rc *releasetypes.ReleaseConfig, gitTagPath, overrideBranch string) (string, error) {
var gitTag string
if rc.DevRelease {
tagList, err := git.GetRepoTagsDescending(rc.CliRepoSource)
if err != nil {
return "", errors.Cause(err)
}
gitTag = strings.Split(tagList, "\n")[0]
} else {
gitTag = rc.ReleaseVersion
}
return gitTag, nil
}
func NonExistentTagAssigner(rc *releasetypes.ReleaseConfig, gitTagPath, overrideBranch string) (string, error) {
return "non-existent", nil
}
func GetGitTagAssigner(ac *assettypes.AssetConfig) assettypes.GitTagAssigner {
if ac.GitTagAssigner != nil {
return assettypes.GitTagAssigner(ac.GitTagAssigner)
}
return assettypes.GitTagAssigner(BuildToolingGitTagAssigner)
}
| 67 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
type ManifestComponent struct {
Name string
ReleaseManifestPrefix string
ManifestFiles []string
NoVersionSuffix bool
}
type ImageTagConfiguration struct {
SourceLatestTagFromECR bool
NonProdSourceImageTagFormat string
ProdSourceImageTagFormat string
ReleaseImageTagFormat string
}
type Image struct {
AssetName string
RepoName string
TrimEksAPrefix bool
ImageTagConfiguration ImageTagConfiguration
TrimVersionSignifier bool
}
type Archive struct {
Name string
Format string
OSName string
ArchitectureOverride string
ArchiveS3PathGetter ArchiveS3PathGenerator
}
type AssetConfig struct {
ProjectName string
ProjectPath string
GitTagAssigner GitTagAssigner
Archives []*Archive
Images []*Image
ImageRepoPrefix string
ImageTagOptions []string
Manifests []*ManifestComponent
NoGitTag bool
HasReleaseBranches bool
HasSeparateTagPerReleaseBranch bool
OnlyForDevRelease bool
UsesKubeRbacProxy bool
}
type ArchiveS3PathGenerator func(rc *releasetypes.ReleaseConfig, archive *Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion, latestPath, arch string) (string, string, string, string, error)
type GitTagAssigner func(rc *releasetypes.ReleaseConfig, gitTagPath, overrideBranch string) (string, error)
| 70 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ecr
import (
"encoding/base64"
"fmt"
"reflect"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecr"
docker "github.com/fsouza/go-dockerclient"
"github.com/pkg/errors"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
func GetImageDigest(imageUri, imageContainerRegistry string, ecrClient *ecr.ECR) (string, error) {
repository, tag := artifactutils.SplitImageUri(imageUri, imageContainerRegistry)
imageDetails, err := DescribeImagesPaginated(ecrClient,
&ecr.DescribeImagesInput{
ImageIds: []*ecr.ImageIdentifier{
{
ImageTag: aws.String(tag),
},
},
RepositoryName: aws.String(repository),
},
)
if err != nil {
return "", errors.Cause(err)
}
imageDigest := imageDetails[0].ImageDigest
imageDigestStr := *imageDigest
return imageDigestStr, nil
}
func GetAuthToken(ecrClient *ecr.ECR) (string, error) {
authTokenOutput, err := ecrClient.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{})
if err != nil {
return "", errors.Cause(err)
}
authToken := *authTokenOutput.AuthorizationData[0].AuthorizationToken
return authToken, nil
}
func GetAuthConfig(ecrClient *ecr.ECR) (*docker.AuthConfiguration, error) {
// Get ECR authorization token
authToken, err := GetAuthToken(ecrClient)
if err != nil {
return nil, errors.Cause(err)
}
// Decode authorization token to get credential pair
creds, err := base64.StdEncoding.DecodeString(authToken)
if err != nil {
return nil, errors.Cause(err)
}
// Get password from credential pair
credsSplit := strings.Split(string(creds), ":")
password := credsSplit[1]
// Construct docker auth configuration
authConfig := &docker.AuthConfiguration{
Username: "AWS",
Password: password,
}
return authConfig, nil
}
func DescribeImagesPaginated(ecrClient *ecr.ECR, describeInput *ecr.DescribeImagesInput) ([]*ecr.ImageDetail, error) {
var images []*ecr.ImageDetail
describeImagesOutput, err := ecrClient.DescribeImages(describeInput)
if err != nil {
return nil, errors.Cause(err)
}
images = append(images, describeImagesOutput.ImageDetails...)
if describeImagesOutput.NextToken != nil {
nextInput := describeInput
nextInput.NextToken = describeImagesOutput.NextToken
imageDetails, _ := DescribeImagesPaginated(ecrClient, nextInput)
images = append(images, imageDetails...)
}
return images, nil
}
// FilterECRRepoByTagPrefix will take a substring, and a repository as input and find the latest pushed image matching that substring.
func FilterECRRepoByTagPrefix(ecrClient *ecr.ECR, repoName, prefix string, hasTag bool) (string, string, error) {
imageDetails, err := DescribeImagesPaginated(ecrClient, &ecr.DescribeImagesInput{
RepositoryName: aws.String(repoName),
})
if len(imageDetails) == 0 {
return "", "", fmt.Errorf("no image details obtained: %v", err)
}
if err != nil {
return "", "", errors.Cause(err)
}
var filteredImageDetails []*ecr.ImageDetail
if hasTag {
filteredImageDetails = imageTagFilter(imageDetails, prefix)
} else {
filteredImageDetails = imageTagFilterWithout(imageDetails, prefix)
}
// Filter out any tags that don't match our prefix for doubletagged scenarios
for _, detail := range filteredImageDetails {
for _, tag := range detail.ImageTags {
if tag != nil && !strings.HasPrefix(*tag, prefix) {
detail.ImageTags = removeStringSlice(detail.ImageTags, *tag)
}
}
}
// In case we don't find any tag substring matches, we still want to populate the bundle with the latest version.
if len(filteredImageDetails) < 1 {
filteredImageDetails = imageDetails
}
version, sha, err := getLastestOCIShaTag(filteredImageDetails)
if err != nil {
return "", "", err
}
return version, sha, nil
}
// imageTagFilter is used when filtering a list of ECR images for a specific tag or tag substring
func imageTagFilter(details []*ecr.ImageDetail, substring string) []*ecr.ImageDetail {
var filteredDetails []*ecr.ImageDetail
for _, detail := range details {
for _, tag := range detail.ImageTags {
if strings.HasPrefix(*tag, substring) {
filteredDetails = append(filteredDetails, detail)
}
}
}
return filteredDetails
}
// imageTagFilterWithout is used when filtering a list of ECR images for images without a specific tag or tag substring
func imageTagFilterWithout(details []*ecr.ImageDetail, substring string) []*ecr.ImageDetail {
var filteredDetails []*ecr.ImageDetail
for _, detail := range details {
for _, tag := range detail.ImageTags {
if !strings.HasPrefix(*tag, substring) {
filteredDetails = append(filteredDetails, detail)
}
}
}
return filteredDetails
}
// getLastestOCIShaTag is used to find the tag/sha of the latest pushed OCI image from a list.
func getLastestOCIShaTag(details []*ecr.ImageDetail) (string, string, error) {
latest := &ecr.ImageDetail{}
latest.ImagePushedAt = &time.Time{}
for _, detail := range details {
if len(details) < 1 || detail.ImagePushedAt == nil || detail.ImageDigest == nil || detail.ImageTags == nil || len(detail.ImageTags) == 0 {
continue
}
if detail.ImagePushedAt != nil && latest.ImagePushedAt.Before(*detail.ImagePushedAt) {
latest = detail
}
}
if reflect.DeepEqual(latest, ecr.ImageDetail{}) {
return "", "", fmt.Errorf("error no images found")
}
return *latest.ImageTags[0], *latest.ImageDigest, nil
}
func GetLatestImageSha(ecrClient *ecr.ECR, repoName string) (string, error) {
imageDetails, err := DescribeImagesPaginated(ecrClient, &ecr.DescribeImagesInput{
RepositoryName: aws.String(repoName),
})
if len(imageDetails) == 0 {
return "", fmt.Errorf("no image details obtained: %v", err)
}
if err != nil {
return "", errors.Cause(err)
}
latest := &ecr.ImageDetail{}
latest.ImagePushedAt = &time.Time{}
for _, detail := range imageDetails {
if detail.ImagePushedAt == nil || detail.ImageDigest == nil || detail.ImageTags == nil || len(detail.ImageTags) == 0 || *detail.ImageManifestMediaType != "application/vnd.oci.image.manifest.v1+json" {
continue
}
if detail.ImagePushedAt != nil && latest.ImagePushedAt.Before(*detail.ImagePushedAt) {
latest = detail
}
}
// Check if latest is empty, and return error if that's the case.
if *latest.ImageTags[0] == "" {
return "", fmt.Errorf("error no images found")
}
return *latest.ImageTags[0], nil
}
// removeStringSlice removes a named string from a slice, without knowing it's index or it being ordered.
func removeStringSlice(l []*string, item string) []*string {
for i, other := range l {
if *other == item {
return append(l[:i], l[i+1:]...)
}
}
return l
}
| 224 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ecrpublic
import (
"encoding/base64"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecrpublic"
docker "github.com/fsouza/go-dockerclient"
"github.com/pkg/errors"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
func GetImageDigest(imageUri, imageContainerRegistry string, ecrPublicClient *ecrpublic.ECRPublic) (string, error) {
repository, tag := artifactutils.SplitImageUri(imageUri, imageContainerRegistry)
describeImagesOutput, err := ecrPublicClient.DescribeImages(
&ecrpublic.DescribeImagesInput{
ImageIds: []*ecrpublic.ImageIdentifier{
{
ImageTag: aws.String(tag),
},
},
RepositoryName: aws.String(repository),
},
)
if err != nil {
return "", errors.Cause(err)
}
imageDigest := describeImagesOutput.ImageDetails[0].ImageDigest
imageDigestStr := *imageDigest
return imageDigestStr, nil
}
func GetAuthToken(ecrPublicClient *ecrpublic.ECRPublic) (string, error) {
authTokenOutput, err := ecrPublicClient.GetAuthorizationToken(&ecrpublic.GetAuthorizationTokenInput{})
if err != nil {
return "", errors.Cause(err)
}
authToken := *authTokenOutput.AuthorizationData.AuthorizationToken
return authToken, nil
}
func GetAuthConfig(ecrPublicClient *ecrpublic.ECRPublic) (*docker.AuthConfiguration, error) {
// Get ECR Public authorization token
authToken, err := GetAuthToken(ecrPublicClient)
if err != nil {
return nil, errors.Cause(err)
}
// Decode authorization token to get credential pair
creds, err := base64.StdEncoding.DecodeString(authToken)
if err != nil {
return nil, errors.Cause(err)
}
// Get password from credential pair
credsSplit := strings.Split(string(creds), ":")
password := credsSplit[1]
// Construct docker auth configuration
authConfig := &docker.AuthConfiguration{
Username: "AWS",
Password: password,
}
return authConfig, nil
}
func CheckImageExistence(imageUri, imageContainerRegistry string, ecrPublicClient *ecrpublic.ECRPublic) (bool, error) {
repository, tag := artifactutils.SplitImageUri(imageUri, imageContainerRegistry)
_, err := ecrPublicClient.DescribeImages(
&ecrpublic.DescribeImagesInput{
ImageIds: []*ecrpublic.ImageIdentifier{
{
ImageTag: aws.String(tag),
},
},
RepositoryName: aws.String(repository),
},
)
if err != nil {
if strings.Contains(err.Error(), ecrpublic.ErrCodeImageNotFoundException) {
return false, nil
} else {
return false, errors.Cause(err)
}
}
return true, nil
}
| 108 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s3
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pkg/errors"
)
func DownloadFile(filePath, bucket, key string) error {
objectURL := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucket, key)
if err := os.MkdirAll(filepath.Dir(filePath), 0o755); err != nil {
return errors.Cause(err)
}
fd, err := os.Create(filePath)
if err != nil {
return errors.Cause(err)
}
defer fd.Close()
// Get the data
resp, err := http.Get(objectURL)
if err != nil {
return err
}
defer resp.Body.Close()
// Check server response
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad status: %s", resp.Status)
}
_, err = io.Copy(fd, resp.Body)
if err != nil {
return err
}
return nil
}
func UploadFile(filePath string, bucket, key *string, s3Uploader *s3manager.Uploader) error {
fd, err := os.Open(filePath)
if err != nil {
return errors.Cause(err)
}
defer fd.Close()
result, err := s3Uploader.Upload(&s3manager.UploadInput{
Bucket: bucket,
Key: key,
Body: fd,
ACL: aws.String("public-read"),
})
if err != nil {
return errors.Cause(err)
}
fmt.Printf("Artifact file uploaded to %s\n", result.Location)
return nil
}
func KeyExists(bucket string, key string) bool {
objectUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucket, key)
resp, err := http.Head(objectUrl)
if err != nil || resp.StatusCode != http.StatusOK {
return false
}
return true
}
| 93 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func GetBottlerocketHostContainersBundle(r *releasetypes.ReleaseConfig, eksDReleaseChannel string, imageDigests map[string]string) (anywherev1alpha1.BottlerocketHostContainersBundle, error) {
adminArtifact, err := bottlerocketDefaultArtifact(r, "BOTTLEROCKET_ADMIN_CONTAINER_METADATA", "bottlerocket-admin")
if err != nil {
return anywherev1alpha1.BottlerocketHostContainersBundle{}, errors.Cause(err)
}
controlArtifact, err := bottlerocketDefaultArtifact(r, "BOTTLEROCKET_CONTROL_CONTAINER_METADATA", "bottlerocket-control")
if err != nil {
return anywherev1alpha1.BottlerocketHostContainersBundle{}, errors.Cause(err)
}
bundle := anywherev1alpha1.BottlerocketHostContainersBundle{
Admin: adminArtifact,
Control: controlArtifact,
KubeadmBootstrap: bottlerocketKubeadmBootstrapArtifact(r, eksDReleaseChannel, imageDigests),
}
return bundle, nil
}
func bottlerocketDefaultArtifact(r *releasetypes.ReleaseConfig, metadataFile, imageName string) (anywherev1alpha1.Image, error) {
bottlerocketContainerRegistry := "public.ecr.aws/bottlerocket"
tag, imageDigest, err := filereader.GetBottlerocketContainerMetadata(r, metadataFile)
if err != nil {
return anywherev1alpha1.Image{}, errors.Cause(err)
}
return anywherev1alpha1.Image{
Name: imageName,
Description: fmt.Sprintf("Container image for %s image", imageName),
OS: "linux",
Arch: []string{"amd64"},
URI: fmt.Sprintf("%s/%s:%s", bottlerocketContainerRegistry, imageName, tag),
ImageDigest: imageDigest,
}, nil
}
func bottlerocketKubeadmBootstrapArtifact(r *releasetypes.ReleaseConfig, eksDReleaseChannel string, imageDigests map[string]string) anywherev1alpha1.Image {
artifacts := r.BundleArtifactsTable[fmt.Sprintf("bottlerocket-bootstrap-%s", eksDReleaseChannel)]
bundleArtifacts := map[string]anywherev1alpha1.Image{}
for _, artifact := range artifacts {
imageArtifact := artifact.Image
bottlerocketBootstrapImage := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleArtifacts[imageArtifact.AssetName] = bottlerocketBootstrapImage
}
return bundleArtifacts["bottlerocket-bootstrap"]
}
| 84 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
sliceutils "github.com/aws/eks-anywhere/release/pkg/util/slices"
)
// NewBundlesName provides a strict format for bundle names, which is validated against in the Cluster webhook
// Numbers must be monotonically increasing to be upgraded.
func NewBundlesName(r *releasetypes.ReleaseConfig) string {
return fmt.Sprintf("bundles-%d", r.BundleNumber)
}
func NewBaseBundles(r *releasetypes.ReleaseConfig) *anywherev1alpha1.Bundles {
return &anywherev1alpha1.Bundles{
TypeMeta: metav1.TypeMeta{
APIVersion: anywherev1alpha1.GroupVersion.String(),
Kind: constants.BundlesKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: NewBundlesName(r),
CreationTimestamp: metav1.Time{Time: r.ReleaseTime},
},
Spec: anywherev1alpha1.BundlesSpec{
Number: r.BundleNumber,
},
}
}
// GetVersionsBundles will build the entire bundle manifest from the
// individual component bundles.
func GetVersionsBundles(r *releasetypes.ReleaseConfig, imageDigests map[string]string) ([]anywherev1alpha1.VersionsBundle, error) {
versionsBundles := []anywherev1alpha1.VersionsBundle{}
certManagerBundle, err := GetCertManagerBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for cert-manager")
}
coreClusterApiBundle, err := GetCoreClusterAPIBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for core cluster-api")
}
kubeadmBootstrapBundle, err := GetKubeadmBootstrapBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for cluster-api kubeadm-bootstrap")
}
kubeadmControlPlaneBundle, err := GetKubeadmControlPlaneBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for cluster-api kubeadm-control-plane")
}
dockerBundle, err := GetDockerBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Docker infrastructure provider")
}
eksaBundle, err := GetEksaBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for eks-a tools component")
}
ciliumBundle, err := GetCiliumBundle(r)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Cilium")
}
kindnetdBundle, err := GetKindnetdBundle(r)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Kindnetd")
}
haproxyBundle, err := GetHaproxyBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Haproxy")
}
fluxBundle, err := GetFluxBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Flux controllers")
}
etcdadmBootstrapBundle, err := GetEtcdadmBootstrapBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for external Etcdadm bootstrap")
}
etcdadmControllerBundle, err := GetEtcdadmControllerBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for external Etcdadm controller")
}
packageBundle, err := GetPackagesBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Package controllers")
}
tinkerbellBundle, err := GetTinkerbellBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Tinkerbell infrastructure provider")
}
cloudStackBundle, err := GetCloudStackBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for CloudStack infrastructure provider")
}
nutanixBundle, err := GetNutanixBundle(r, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Nutanix infrastructure provider")
}
eksDReleaseMap, err := filereader.ReadEksDReleases(r)
if err != nil {
return nil, err
}
supportedK8sVersions, err := filereader.GetSupportedK8sVersions(r)
if err != nil {
return nil, errors.Wrapf(err, "Error getting supported Kubernetes versions for bottlerocket")
}
for _, release := range eksDReleaseMap.Releases {
channel := release.Branch
number := strconv.Itoa(release.Number)
dev := release.Dev
kubeVersion := release.KubeVersion
shortKubeVersion := kubeVersion[1:strings.LastIndex(kubeVersion, ".")]
if !sliceutils.SliceContains(supportedK8sVersions, channel) {
continue
}
eksDReleaseBundle, err := GetEksDReleaseBundle(r, channel, kubeVersion, number, imageDigests, dev)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for eks-d %s-%s release bundle", channel, number)
}
vsphereBundle, err := GetVsphereBundle(r, channel, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for vSphere infrastructure provider")
}
bottlerocketHostContainersBundle, err := GetBottlerocketHostContainersBundle(r, channel, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for bottlerocket host containers")
}
snowBundle, err := GetSnowBundle(r, channel, imageDigests)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle for Snow infrastructure provider")
}
versionsBundle := anywherev1alpha1.VersionsBundle{
KubeVersion: shortKubeVersion,
EksD: eksDReleaseBundle,
CertManager: certManagerBundle,
ClusterAPI: coreClusterApiBundle,
Bootstrap: kubeadmBootstrapBundle,
ControlPlane: kubeadmControlPlaneBundle,
VSphere: vsphereBundle,
CloudStack: cloudStackBundle,
Docker: dockerBundle,
Eksa: eksaBundle,
Cilium: ciliumBundle,
Kindnetd: kindnetdBundle,
Flux: fluxBundle,
PackageController: packageBundle,
ExternalEtcdBootstrap: etcdadmBootstrapBundle,
ExternalEtcdController: etcdadmControllerBundle,
BottleRocketHostContainers: bottlerocketHostContainersBundle,
Tinkerbell: tinkerbellBundle,
Haproxy: haproxyBundle,
Snow: snowBundle,
Nutanix: nutanixBundle,
}
versionsBundles = append(versionsBundles, versionsBundle)
}
return versionsBundles, nil
}
| 207 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetCertManagerBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.CertManagerBundle, error) {
artifacts := r.BundleArtifactsTable["cert-manager"]
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, artifact := range artifacts {
if artifact.Image != nil {
imageArtifact := artifact.Image
sourceBranch = imageArtifact.SourcedFromBranch
bundleArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleArtifact
artifactHashes = append(artifactHashes, bundleArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.CertManagerBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CertManagerProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.CertManagerBundle{}, errors.Wrapf(err, "Error getting version for cert-manager")
}
bundle := anywherev1alpha1.CertManagerBundle{
Version: version,
Acmesolver: bundleImageArtifacts["cert-manager-acmesolver"],
Cainjector: bundleImageArtifacts["cert-manager-cainjector"],
Controller: bundleImageArtifacts["cert-manager-controller"],
Ctl: bundleImageArtifacts["cert-manager-ctl"],
Webhook: bundleImageArtifacts["cert-manager-webhook"],
Manifest: bundleManifestArtifacts["cert-manager.yaml"],
}
return bundle, nil
}
| 96 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"path/filepath"
"strings"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
const (
ciliumImageName = "cilium"
ciliumOperatorImageName = "operator-generic"
ciliumHelmChartName = "cilium-chart"
ciliumHelmChart = "cilium"
ciliumImage = "cilium"
ciliumOperatorImage = "operator-generic"
)
func GetCiliumBundle(r *releasetypes.ReleaseConfig) (anywherev1alpha1.CiliumBundle, error) {
artifacts := r.BundleArtifactsTable["cilium"]
ciliumContainerRegistry := "public.ecr.aws/isovalent"
ciliumGitTag, err := filereader.ReadGitTag(constants.CiliumProjectPath, r.BuildRepoSource, r.BuildRepoBranchName)
if err != nil {
return anywherev1alpha1.CiliumBundle{}, errors.Cause(err)
}
ciliumImages := []imageDefinition{
containerImage(ciliumImageName, ciliumImage, ciliumContainerRegistry, ciliumGitTag),
containerImage(ciliumOperatorImageName, ciliumOperatorImage, ciliumContainerRegistry, ciliumGitTag),
// Helm charts are in the same repository and have the same
// sem version as the corresponding container image but omiting the initial "v"
chart(ciliumHelmChartName, ciliumHelmChart, ciliumContainerRegistry, strings.TrimPrefix(ciliumGitTag, "v")),
}
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
for _, imageDef := range ciliumImages {
imageDigest, err := getCiliumImageDigest(r.BuildRepoSource, imageDef.name)
if err != nil {
return anywherev1alpha1.CiliumBundle{}, errors.Cause(err)
}
bundleImageArtifacts[imageDef.name] = imageDef.builder(imageDigest)
}
for _, artifact := range artifacts {
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
}
}
bundle := anywherev1alpha1.CiliumBundle{
Version: ciliumGitTag,
Cilium: bundleImageArtifacts[ciliumImageName],
Operator: bundleImageArtifacts[ciliumOperatorImageName],
Manifest: bundleManifestArtifacts["cilium.yaml"],
HelmChart: bundleImageArtifacts[ciliumHelmChartName],
}
return bundle, nil
}
func getCiliumImageDigest(gitRootPath, imageName string) (string, error) {
projectSource := "projects/cilium/cilium"
imageDigestFileName := fmt.Sprintf("images/%s/IMAGE_DIGEST", imageName)
imageDigestFile := filepath.Join(gitRootPath, projectSource, imageDigestFileName)
imageDigest, err := filereader.ReadFileContentsTrimmed(imageDigestFile)
if err != nil {
return "", errors.Cause(err)
}
return imageDigest, nil
}
type imageDefinition struct {
name, image, registry, tag string
builder imageBuilder
}
type imageBuilder func(digest string) anywherev1alpha1.Image
func containerImage(name, image, registry, tag string) imageDefinition {
return imageDefinition{
name: name,
image: image,
registry: registry,
tag: tag,
builder: func(digest string) anywherev1alpha1.Image {
return anywherev1alpha1.Image{
Name: name,
Description: fmt.Sprintf("Container image for %s image", name),
OS: "linux",
Arch: []string{"amd64"},
URI: fmt.Sprintf("%s/%s:%s", registry, image, tag),
ImageDigest: digest,
}
},
}
}
func chart(name, image, registry, tag string) imageDefinition {
return imageDefinition{
name: name,
image: image,
registry: registry,
tag: tag,
builder: func(digest string) anywherev1alpha1.Image {
return anywherev1alpha1.Image{
Name: name,
Description: fmt.Sprintf("Helm chart for %s", name),
URI: fmt.Sprintf("%s/%s:%s", registry, image, tag),
ImageDigest: digest,
}
},
}
}
| 143 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetCloudStackBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.CloudStackBundle, error) {
cloudstackBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api-provider-cloudstack": r.BundleArtifactsTable["cluster-api-provider-cloudstack"],
"kube-vip": r.BundleArtifactsTable["kube-vip"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
}
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for componentName, artifacts := range cloudstackBundleArtifacts {
for _, artifact := range artifacts {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "cluster-api-provider-cloudstack" {
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.CloudStackBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapcProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.CloudStackBundle{}, errors.Wrapf(err, "Error getting version for cluster-api-provider-cloudstack")
}
bundle := anywherev1alpha1.CloudStackBundle{
Version: version,
ClusterAPIController: bundleImageArtifacts["cluster-api-provider-cloudstack"],
KubeVip: bundleImageArtifacts["kube-vip"],
KubeRbacProxy: bundleImageArtifacts["kube-rbac-proxy"],
Components: bundleManifestArtifacts["infrastructure-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 101 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetDockerBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.DockerBundle, error) {
dockerBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api-provider-docker": r.BundleArtifactsTable["cluster-api-provider-docker"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(dockerBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range dockerBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "cluster-api-provider-docker" {
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.DockerBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapiProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.DockerBundle{}, errors.Wrapf(err, "Error getting version for cluster-api")
}
bundle := anywherev1alpha1.DockerBundle{
Version: version,
Manager: bundleImageArtifacts["cluster-api-provider-docker"],
KubeProxy: bundleImageArtifacts["kube-rbac-proxy"],
Components: bundleManifestArtifacts["infrastructure-components-development.yaml"],
ClusterTemplate: bundleManifestArtifacts["cluster-template-development.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 103 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetCoreClusterAPIBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.CoreClusterAPI, error) {
coreClusterAPIBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api": r.BundleArtifactsTable["cluster-api"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(coreClusterAPIBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range coreClusterAPIBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "cluster-api" {
if imageArtifact.AssetName != "cluster-api-controller" {
continue
}
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
if manifestArtifact.Component != "cluster-api" {
continue
}
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.CoreClusterAPI{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapiProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.CoreClusterAPI{}, errors.Wrapf(err, "Error getting version for cluster-api")
}
bundle := anywherev1alpha1.CoreClusterAPI{
Version: version,
Controller: bundleImageArtifacts["cluster-api-controller"],
KubeProxy: bundleImageArtifacts["kube-rbac-proxy"],
Components: bundleManifestArtifacts["core-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
func GetKubeadmBootstrapBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.KubeadmBootstrapBundle, error) {
kubeadmBootstrapBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api": r.BundleArtifactsTable["cluster-api"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(kubeadmBootstrapBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range kubeadmBootstrapBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "cluster-api" {
if imageArtifact.AssetName != "kubeadm-bootstrap-controller" {
continue
}
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
if manifestArtifact.Component != "bootstrap-kubeadm" {
continue
}
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.KubeadmBootstrapBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapiProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.KubeadmBootstrapBundle{}, errors.Wrapf(err, "Error getting version for cluster-api")
}
bundle := anywherev1alpha1.KubeadmBootstrapBundle{
Version: version,
Controller: bundleImageArtifacts["kubeadm-bootstrap-controller"],
KubeProxy: bundleImageArtifacts["kube-rbac-proxy"],
Components: bundleManifestArtifacts["bootstrap-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
func GetKubeadmControlPlaneBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.KubeadmControlPlaneBundle, error) {
kubeadmControlPlaneBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api": r.BundleArtifactsTable["cluster-api"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(kubeadmControlPlaneBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range kubeadmControlPlaneBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "cluster-api" {
if imageArtifact.AssetName != "kubeadm-control-plane-controller" {
continue
}
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
if manifestArtifact.Component != "control-plane-kubeadm" {
continue
}
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.KubeadmControlPlaneBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapiProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.KubeadmControlPlaneBundle{}, errors.Wrapf(err, "Error getting version for cluster-api")
}
bundle := anywherev1alpha1.KubeadmControlPlaneBundle{
Version: version,
Controller: bundleImageArtifacts["kubeadm-control-plane-controller"],
KubeProxy: bundleImageArtifacts["kube-rbac-proxy"],
Components: bundleManifestArtifacts["control-plane-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 270 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetEksaBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.EksaBundle, error) {
eksABundleArtifacts := map[string][]releasetypes.Artifact{
"eks-anywhere-cli-tools": r.BundleArtifactsTable["eks-anywhere-cli-tools"],
"eks-anywhere-cluster-controller": r.BundleArtifactsTable["eks-anywhere-cluster-controller"],
"eks-anywhere-diagnostic-collector": r.BundleArtifactsTable["eks-anywhere-diagnostic-collector"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(eksABundleArtifacts)
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range eksABundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.EksaBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(version.NewCliVersioner(r.ReleaseVersion, r.CliRepoSource), componentChecksum)
if err != nil {
return anywherev1alpha1.EksaBundle{}, errors.Wrapf(err, "failed generating version for eksa bundle")
}
bundle := anywherev1alpha1.EksaBundle{
Version: version,
CliTools: bundleImageArtifacts["eks-anywhere-cli-tools"],
Components: bundleManifestArtifacts["eksa-components.yaml"],
ClusterController: bundleImageArtifacts["eks-anywhere-cluster-controller"],
DiagnosticCollector: bundleImageArtifacts["eks-anywhere-diagnostic-collector"],
}
return bundle, nil
}
| 97 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"path/filepath"
"strings"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
// GetCliArtifacts returns the artifacts for eksctl-anywhere cli.
func GetEksACliArtifacts(r *releasetypes.ReleaseConfig) ([]releasetypes.Artifact, error) {
osList := []string{"linux", "darwin"}
archList := []string{"amd64", "arm64"}
artifacts := []releasetypes.Artifact{}
for _, os := range osList {
for _, arch := range archList {
releaseName := fmt.Sprintf("eksctl-anywhere-%s-%s-%s.tar.gz", r.ReleaseVersion, os, arch)
releaseName = strings.ReplaceAll(releaseName, "+", "-")
var sourceS3Key string
var sourceS3Prefix string
var releaseS3Path string
sourcedFromBranch := r.CliRepoBranchName
latestPath := artifactutils.GetLatestUploadDestination(sourcedFromBranch)
if r.DevRelease {
sourceS3Key = fmt.Sprintf("eksctl-anywhere-%s-%s.tar.gz", os, arch)
sourceS3Prefix = fmt.Sprintf("eks-a-cli/%s/%s/%s", latestPath, os, arch)
} else if r.ReleaseEnvironment == "development" {
sourceS3Key = fmt.Sprintf("eksctl-anywhere-%s-%s.tar.gz", os, arch)
sourceS3Prefix = fmt.Sprintf("eks-a-cli/staging/%s/%s/%s/", latestPath, os, arch)
} else {
sourceS3Key = fmt.Sprintf("eksctl-anywhere-%s-%s-%s.tar.gz", r.ReleaseVersion, os, arch)
sourceS3Prefix = fmt.Sprintf("releases/eks-a/%d/artifacts/eks-a/%s/%s/%s", r.ReleaseNumber, r.ReleaseVersion, os, arch)
}
if r.DevRelease {
releaseS3Path = fmt.Sprintf("eks-anywhere/%s/eks-a-cli/%s/%s", r.DevReleaseUriVersion, os, arch)
} else {
releaseS3Path = fmt.Sprintf("releases/eks-a/%d/artifacts/eks-a/%s/%s/%s", r.ReleaseNumber, r.ReleaseVersion, os, arch)
}
cdnURI, err := artifactutils.GetURI(r.CDN, filepath.Join(releaseS3Path, releaseName))
if err != nil {
return nil, errors.Cause(err)
}
archiveArtifact := &releasetypes.ArchiveArtifact{
SourceS3Key: sourceS3Key,
SourceS3Prefix: sourceS3Prefix,
ArtifactPath: filepath.Join(r.ArtifactDir, "eks-a", r.CliRepoHead),
ReleaseName: releaseName,
ReleaseS3Path: releaseS3Path,
ReleaseCdnURI: cdnURI,
OS: os,
Arch: []string{arch},
}
artifacts = append(artifacts, releasetypes.Artifact{Archive: archiveArtifact})
}
}
return artifacts, nil
}
func GetEksARelease(r *releasetypes.ReleaseConfig) (anywherev1alpha1.EksARelease, error) {
fmt.Println("\n==========================================================")
fmt.Println(" EKS-A Release Spec Generation")
fmt.Println("==========================================================")
artifacts := r.EksAArtifactsTable["eks-a-cli"]
bundleManifestFilePath := artifactutils.GetManifestFilepaths(r.DevRelease, r.Weekly, r.BundleNumber, constants.BundlesKind, r.BuildRepoBranchName, r.ReleaseDate)
bundleManifestUrl, err := artifactutils.GetURI(r.CDN, bundleManifestFilePath)
if err != nil {
return anywherev1alpha1.EksARelease{}, errors.Cause(err)
}
bundleArchiveArtifacts := map[string]anywherev1alpha1.Archive{}
for _, artifact := range artifacts {
archiveArtifact := artifact.Archive
tarfile := filepath.Join(archiveArtifact.ArtifactPath, archiveArtifact.ReleaseName)
sha256, sha512, err := filereader.ReadShaSums(tarfile, r)
if err != nil {
return anywherev1alpha1.EksARelease{}, errors.Cause(err)
}
bundleArchiveArtifact := anywherev1alpha1.Archive{
Name: fmt.Sprintf("eksctl-anywhere-%s-%s", archiveArtifact.OS, archiveArtifact.Arch[0]),
Description: fmt.Sprintf("EKS Anywhere %s %s CLI", strings.Title(archiveArtifact.OS), archiveArtifact.Arch[0]),
OS: archiveArtifact.OS,
Arch: archiveArtifact.Arch,
URI: archiveArtifact.ReleaseCdnURI,
SHA256: sha256,
SHA512: sha512,
}
bundleArchiveArtifacts[fmt.Sprintf("eksctl-anywhere-%s-%s", archiveArtifact.OS, archiveArtifact.Arch[0])] = bundleArchiveArtifact
}
eksARelease := anywherev1alpha1.EksARelease{
Date: r.ReleaseTime.String(),
Version: r.ReleaseVersion,
Number: r.ReleaseNumber,
GitCommit: r.CliRepoHead,
GitTag: r.ReleaseVersion,
EksABinary: anywherev1alpha1.BinaryBundle{
LinuxBinary: bundleArchiveArtifacts["eksctl-anywhere-linux-amd64"],
DarwinBinary: bundleArchiveArtifacts["eksctl-anywhere-darwin-amd64"],
},
EksACLI: anywherev1alpha1.PlatformBundle{
LinuxBinary: anywherev1alpha1.ArchitectureBundle{
Amd64: bundleArchiveArtifacts["eksctl-anywhere-linux-amd64"],
Arm64: bundleArchiveArtifacts["eksctl-anywhere-linux-arm64"],
},
DarwinBinary: anywherev1alpha1.ArchitectureBundle{
Amd64: bundleArchiveArtifacts["eksctl-anywhere-darwin-amd64"],
Arm64: bundleArchiveArtifacts["eksctl-anywhere-darwin-arm64"],
},
},
BundleManifestUrl: bundleManifestUrl,
}
fmt.Printf("%s Successfully generated EKS-A release spec\n", constants.SuccessIcon)
return eksARelease, nil
}
| 149 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"path/filepath"
"strings"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func GetEksDReleaseBundle(r *releasetypes.ReleaseConfig, eksDReleaseChannel, kubeVer, eksDReleaseNumber string, imageDigests map[string]string, dev bool) (anywherev1alpha1.EksDRelease, error) {
artifacts := r.BundleArtifactsTable[fmt.Sprintf("image-builder-%s", eksDReleaseChannel)]
artifacts = append(artifacts, r.BundleArtifactsTable[fmt.Sprintf("kind-%s", eksDReleaseChannel)]...)
tarballArtifacts := map[string][]releasetypes.Artifact{
"containerd": r.BundleArtifactsTable["containerd"],
"cri-tools": r.BundleArtifactsTable["cri-tools"],
"etcdadm": r.BundleArtifactsTable["etcdadm"],
"image-builder": r.BundleArtifactsTable["image-builder"],
}
bundleArchiveArtifacts := map[string]anywherev1alpha1.Archive{}
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
eksDManifestUrl := filereader.GetEksDReleaseManifestUrl(eksDReleaseChannel, eksDReleaseNumber, dev)
for _, artifact := range artifacts {
if artifact.Archive != nil {
archiveArtifact := artifact.Archive
osName := archiveArtifact.OSName
imageFormat := archiveArtifact.ImageFormat
tarfile := filepath.Join(archiveArtifact.ArtifactPath, archiveArtifact.ReleaseName)
sha256, sha512, err := filereader.ReadShaSums(tarfile, r)
if err != nil {
return anywherev1alpha1.EksDRelease{}, errors.Cause(err)
}
bundleArchiveArtifact := anywherev1alpha1.Archive{
Name: archiveArtifact.ReleaseName,
Description: fmt.Sprintf("%s %s image for EKS-D %s-%s release", strings.Title(archiveArtifact.OSName), strings.Title(archiveArtifact.ImageFormat), eksDReleaseChannel, eksDReleaseNumber),
OS: archiveArtifact.OS,
OSName: archiveArtifact.OSName,
Arch: archiveArtifact.Arch,
URI: archiveArtifact.ReleaseCdnURI,
SHA256: sha256,
SHA512: sha512,
}
bundleArchiveArtifacts[fmt.Sprintf("%s-%s", osName, imageFormat)] = bundleArchiveArtifact
}
if artifact.Image != nil {
imageArtifact := artifact.Image
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts["kind-node"] = bundleImageArtifact
}
}
for componentName, artifacts := range tarballArtifacts {
for _, artifact := range artifacts {
if artifact.Archive != nil {
archiveArtifact := artifact.Archive
tarfile := filepath.Join(archiveArtifact.ArtifactPath, archiveArtifact.ReleaseName)
sha256, sha512, err := filereader.ReadShaSums(tarfile, r)
if err != nil {
return anywherev1alpha1.EksDRelease{}, errors.Cause(err)
}
bundleArchiveArtifact := anywherev1alpha1.Archive{
Name: archiveArtifact.ReleaseName,
Description: fmt.Sprintf("%s tarball for %s/%s", componentName, archiveArtifact.OS, archiveArtifact.Arch[0]),
OS: archiveArtifact.OS,
Arch: archiveArtifact.Arch,
URI: archiveArtifact.ReleaseCdnURI,
SHA256: sha256,
SHA512: sha512,
}
bundleArchiveArtifacts[componentName] = bundleArchiveArtifact
}
}
}
eksdRelease, err := filereader.GetEksdRelease(eksDManifestUrl)
if err != nil {
return anywherev1alpha1.EksDRelease{}, err
}
gitCommit := r.BuildRepoHead
if r.DryRun {
gitCommit = constants.FakeGitCommit
}
bundle := anywherev1alpha1.EksDRelease{
Name: eksdRelease.Name,
ReleaseChannel: eksDReleaseChannel,
KubeVersion: kubeVer,
EksDReleaseUrl: eksDManifestUrl,
GitCommit: gitCommit,
KindNode: bundleImageArtifacts["kind-node"],
Etcdadm: bundleArchiveArtifacts["etcdadm"],
Crictl: bundleArchiveArtifacts["cri-tools"],
Containerd: bundleArchiveArtifacts["containerd"],
ImageBuilder: bundleArchiveArtifacts["image-builder"],
Ami: anywherev1alpha1.OSImageBundle{
Bottlerocket: bundleArchiveArtifacts["bottlerocket-ami"],
},
Ova: anywherev1alpha1.OSImageBundle{
Bottlerocket: bundleArchiveArtifacts["bottlerocket-ova"],
},
Raw: anywherev1alpha1.OSImageBundle{
Bottlerocket: bundleArchiveArtifacts["bottlerocket-raw"],
},
Components: constants.EksDReleaseComponentsUrl,
}
return bundle, nil
}
| 147 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetEtcdadmBootstrapBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.EtcdadmBootstrapBundle, error) {
etcdadmBootstrapBundleArtifacts := map[string][]releasetypes.Artifact{
"etcdadm-bootstrap-provider": r.BundleArtifactsTable["etcdadm-bootstrap-provider"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(etcdadmBootstrapBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range etcdadmBootstrapBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "etcdadm-bootstrap-provider" {
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.EtcdadmBootstrapBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.EtcdadmBootstrapProviderProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.EtcdadmBootstrapBundle{}, errors.Wrapf(err, "Error getting version for etcdadm-bootstrap-provider")
}
bundle := anywherev1alpha1.EtcdadmBootstrapBundle{
Version: version,
Controller: bundleImageArtifacts["etcdadm-bootstrap-provider"],
KubeProxy: bundleImageArtifacts["kube-rbac-proxy"],
Components: bundleManifestArtifacts["bootstrap-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 103 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetEtcdadmControllerBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.EtcdadmControllerBundle, error) {
etcdadmControllerBundleArtifacts := map[string][]releasetypes.Artifact{
"etcdadm-controller": r.BundleArtifactsTable["etcdadm-controller"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(etcdadmControllerBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range etcdadmControllerBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "etcdadm-controller" {
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.EtcdadmControllerBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.EtcdadmControllerProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.EtcdadmControllerBundle{}, errors.Wrapf(err, "Error getting version for etcdadm-controller")
}
bundle := anywherev1alpha1.EtcdadmControllerBundle{
Version: version,
Controller: bundleImageArtifacts["etcdadm-controller"],
KubeProxy: bundleImageArtifacts["kube-rbac-proxy"],
Components: bundleManifestArtifacts["bootstrap-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 103 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetFluxBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.FluxBundle, error) {
fluxBundleArtifacts := map[string][]releasetypes.Artifact{
"helm-controller": r.BundleArtifactsTable["helm-controller"],
"kustomize-controller": r.BundleArtifactsTable["kustomize-controller"],
"notification-controller": r.BundleArtifactsTable["notification-controller"],
"source-controller": r.BundleArtifactsTable["source-controller"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(fluxBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range fluxBundleArtifacts[componentName] {
imageArtifact := artifact.Image
sourceBranch = imageArtifact.SourcedFromBranch
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewMultiProjectVersionerWithGITTAG(r.BuildRepoSource,
constants.FluxcdRootPath,
constants.Flux2ProjectPath,
sourceBranch,
r,
),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.FluxBundle{}, errors.Wrap(err, "failed generating version for flux bundle")
}
bundle := anywherev1alpha1.FluxBundle{
Version: version,
SourceController: bundleImageArtifacts["source-controller"],
KustomizeController: bundleImageArtifacts["kustomize-controller"],
HelmController: bundleImageArtifacts["helm-controller"],
NotificationController: bundleImageArtifacts["notification-controller"],
}
return bundle, nil
}
| 90 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func GetHaproxyBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.HaproxyBundle, error) {
artifacts := r.BundleArtifactsTable["haproxy"]
bundleArtifacts := map[string]anywherev1alpha1.Image{}
for _, artifact := range artifacts {
imageArtifact := artifact.Image
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleArtifacts[imageArtifact.AssetName] = bundleImageArtifact
}
bundle := anywherev1alpha1.HaproxyBundle{
Image: bundleArtifacts["haproxy"],
}
return bundle, nil
}
| 48 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetKindnetdBundle(r *releasetypes.ReleaseConfig) (anywherev1alpha1.KindnetdBundle, error) {
artifacts := r.BundleArtifactsTable["kindnetd"]
var sourceBranch string
var componentChecksum string
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, artifact := range artifacts {
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
sourceBranch = manifestArtifact.SourcedFromBranch
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.KindnetdBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.KindProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.KindnetdBundle{}, errors.Wrapf(err, "Error getting version for kind")
}
bundle := anywherev1alpha1.KindnetdBundle{
Version: version,
Manifest: bundleManifestArtifacts["kindnetd.yaml"],
}
return bundle, nil
}
| 74 |
eks-anywhere | aws | Go | package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
// GetNutanixBundle returns the bundle for Nutanix.
func GetNutanixBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.NutanixBundle, error) {
nutanixBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api-provider-nutanix": r.BundleArtifactsTable["cluster-api-provider-nutanix"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
"kube-vip": r.BundleArtifactsTable["kube-vip"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(nutanixBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := make([]string, 0)
for _, componentName := range sortedComponentNames {
for _, artifact := range nutanixBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "cluster-api-provider-nutanix" {
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.NutanixBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
capxVersion, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapxProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.NutanixBundle{}, errors.Wrapf(err, "Error getting version for cluster-api-provider-nutanix")
}
bundle := anywherev1alpha1.NutanixBundle{
Version: capxVersion,
ClusterAPIController: bundleImageArtifacts["cluster-api-provider-nutanix"],
KubeVip: bundleImageArtifacts["kube-vip"],
Components: bundleManifestArtifacts["infrastructure-components.yaml"],
ClusterTemplate: bundleManifestArtifacts["cluster-template.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 89 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"strings"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/aws/ecr"
"github.com/aws/eks-anywhere/release/pkg/aws/ecrpublic"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/helm"
"github.com/aws/eks-anywhere/release/pkg/images"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetPackagesBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.PackageBundle, error) {
artifacts := map[string][]releasetypes.Artifact{
"eks-anywhere-packages": r.BundleArtifactsTable["eks-anywhere-packages"],
"ecr-token-refresher": r.BundleArtifactsTable["ecr-token-refresher"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(artifacts)
var sourceBranch string
var componentChecksum string
var Helmtag, Imagetag, Tokentag string
var Helmsha, Imagesha, TokenSha string
var err error
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
artifactHashes := []string{}
// Find latest Package Dev build for the Helm chart and Image which will always start with `0.0.0` and is built off of the package Github repo main on every commit.
// If we can't find the build starting with our substring, we default to the original dev tag.
// If we do find the Tag in Private ECR, but it doesn't exist in Public ECR Copy the image over so the helm chart will work correctly.
if r.DevRelease && !r.DryRun {
Helmtag, Helmsha, err = ecr.FilterECRRepoByTagPrefix(r.SourceClients.ECR.EcrClient, "eks-anywhere-packages", "0.0.0", true)
if err != nil {
fmt.Printf("Error getting dev version helm tag EKS Anywhere package controller, using latest version %v", err)
}
Imagetag, Imagesha, err = ecr.FilterECRRepoByTagPrefix(r.SourceClients.ECR.EcrClient, "eks-anywhere-packages", "v0.0.0", true)
if err != nil {
fmt.Printf("Error getting dev version Image tag EKS Anywhere package controller, using latest version %v", err)
}
PackageImage, err := ecrpublic.CheckImageExistence(fmt.Sprintf("%s/%s:%s", r.ReleaseContainerRegistry, "eks-anywhere-packages", Imagetag), r.ReleaseContainerRegistry, r.ReleaseClients.ECRPublic.Client)
if err != nil {
fmt.Printf("Error checking image version existance for EKS Anywhere package controller, using latest version: %v", err)
}
if !PackageImage {
fmt.Printf("Did not find the required helm image in Public ECR... copying image: %v\n", fmt.Sprintf("%s/%s:%s", r.ReleaseContainerRegistry, "eks-anywhere-packages", Imagetag))
err := images.CopyToDestination(r.SourceClients.ECR.AuthConfig, r.ReleaseClients.ECRPublic.AuthConfig, fmt.Sprintf("%s/%s:%s", r.SourceContainerRegistry, "eks-anywhere-packages", Imagetag), fmt.Sprintf("%s/%s:%s", r.ReleaseContainerRegistry, "eks-anywhere-packages", Imagetag))
if err != nil {
fmt.Printf("Error copying dev EKS Anywhere package controller image, to ECR Public: %v", err)
}
}
Tokentag, TokenSha, err = ecr.FilterECRRepoByTagPrefix(r.SourceClients.ECR.EcrClient, "ecr-token-refresher", "v0.0.0", true)
if err != nil {
fmt.Printf("Error getting dev version Image tag EKS Anywhere package token refresher, using latest version %v", err)
}
TokenImage, err := ecrpublic.CheckImageExistence(fmt.Sprintf("%s/%s:%s", r.ReleaseContainerRegistry, "ecr-token-refresher", Tokentag), r.ReleaseContainerRegistry, r.ReleaseClients.ECRPublic.Client)
if err != nil {
fmt.Printf("Error checking image version existance for EKS Anywhere package token refresher, using latest version: %v", err)
}
if !TokenImage {
fmt.Printf("Did not find the required helm image in Public ECR... copying image: %v\n", fmt.Sprintf("%s/%s:%s", r.ReleaseContainerRegistry, "ecr-token-refresher", Tokentag))
err := images.CopyToDestination(r.SourceClients.ECR.AuthConfig, r.ReleaseClients.ECRPublic.AuthConfig, fmt.Sprintf("%s/%s:%s", r.SourceContainerRegistry, "ecr-token-refresher", Tokentag), fmt.Sprintf("%s/%s:%s", r.ReleaseContainerRegistry, "ecr-token-refresher", Tokentag))
if err != nil {
fmt.Printf("Error copying dev EKS Anywhere package token refresher image, to ECR Public: %v", err)
}
}
}
for _, componentName := range sortedComponentNames {
for _, artifact := range artifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
sourceBranch = imageArtifact.SourcedFromBranch
bundleImageArtifact := anywherev1alpha1.Image{}
if strings.HasSuffix(imageArtifact.AssetName, "helm") {
Digest := imageDigests[imageArtifact.ReleaseImageURI]
if r.DevRelease && Helmsha != "" && Helmtag != "" {
Digest = Helmsha
imageArtifact.ReleaseImageURI = replaceTag(imageArtifact.ReleaseImageURI, Helmtag)
}
assetName := strings.TrimSuffix(imageArtifact.AssetName, "-helm")
bundleImageArtifact = anywherev1alpha1.Image{
Name: assetName,
Description: fmt.Sprintf("Helm chart for %s", assetName),
URI: imageArtifact.ReleaseImageURI,
ImageDigest: Digest,
}
} else {
Digest := imageDigests[imageArtifact.ReleaseImageURI]
if strings.HasSuffix(imageArtifact.AssetName, "eks-anywhere-packages") && r.DevRelease && TokenSha != "" && Tokentag != "" {
Digest = Imagesha
imageArtifact.ReleaseImageURI = replaceTag(imageArtifact.ReleaseImageURI, Imagetag)
} else if strings.HasSuffix(imageArtifact.AssetName, "ecr-token-refresher") && r.DevRelease && Imagesha != "" && Imagetag != "" {
Digest = TokenSha
imageArtifact.ReleaseImageURI = replaceTag(imageArtifact.ReleaseImageURI, Tokentag)
}
bundleImageArtifact = anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: Digest,
}
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
}
}
if !r.DryRun && r.DevRelease && r.BuildRepoBranchName == "main" {
for _, componentName := range sortedComponentNames {
for _, artifact := range artifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
sourceBranch = imageArtifact.SourcedFromBranch
if strings.HasSuffix(imageArtifact.AssetName, "helm") {
trimmedAsset := strings.TrimSuffix(artifact.Image.AssetName, "-helm")
fmt.Printf("trimmedAsset=%v\n\n", trimmedAsset)
helmDriver, err := helm.NewHelm()
if err != nil {
return anywherev1alpha1.PackageBundle{}, errors.Wrap(err, "creating helm client")
}
fmt.Printf("Modifying helm chart for %s\n", trimmedAsset)
helmDest, err := helm.GetHelmDest(helmDriver, r, imageArtifact.ReleaseImageURI, trimmedAsset)
if err != nil {
return anywherev1alpha1.PackageBundle{}, errors.Wrap(err, "getting Helm destination:")
}
fmt.Printf("helmDest=%v\n", helmDest)
fmt.Printf("Pulled helm chart locally to %s\n", helmDest)
fmt.Printf("r.sourceClients")
err = helm.ModifyAndPushChartYaml(*imageArtifact, r, helmDriver, helmDest, artifacts, bundleImageArtifacts)
if err != nil {
return anywherev1alpha1.PackageBundle{}, errors.Wrap(err, "modifying Chart.yaml and pushing Helm chart to destination:")
}
}
}
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.PackagesProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.PackageBundle{}, errors.Wrap(err, "Error getting version for EKS Anywhere package controller")
}
bundle := anywherev1alpha1.PackageBundle{
Version: version,
Controller: bundleImageArtifacts["eks-anywhere-packages"],
TokenRefresher: bundleImageArtifacts["ecr-token-refresher"],
HelmChart: bundleImageArtifacts["eks-anywhere-packages-helm"],
}
return bundle, nil
}
// replaceTag is used to replace the tag of an Image URI with a string.
func replaceTag(uri, tag string) string {
NewURIList := strings.Split(uri, ":")
if len(NewURIList) < 2 {
return uri
}
NewURIList[len(NewURIList)-1] = tag
uri = strings.Join(NewURIList[:], ":")
return uri
}
| 194 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetSnowBundle(r *releasetypes.ReleaseConfig, eksDReleaseChannel string, imageDigests map[string]string) (anywherev1alpha1.SnowBundle, error) {
capasBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api-provider-aws-snow": r.BundleArtifactsTable["cluster-api-provider-aws-snow"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
"kube-vip": r.BundleArtifactsTable["kube-vip"],
"bottlerocket-bootstrap-snow": r.BundleArtifactsTable[fmt.Sprintf("bottlerocket-bootstrap-%s", eksDReleaseChannel)],
}
sortedComponentNames := bundleutils.SortArtifactsMap(capasBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range capasBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
if componentName == "cluster-api-provider-aws-snow" {
sourceBranch = manifestArtifact.SourcedFromBranch
}
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.SnowBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapasProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.SnowBundle{}, errors.Wrapf(err, "Error getting version for CAPAS")
}
bundle := anywherev1alpha1.SnowBundle{
Version: version,
Manager: bundleImageArtifacts["cluster-api-snow-controller"],
KubeVip: bundleImageArtifacts["kube-vip"],
BottlerocketBootstrapSnow: bundleImageArtifacts["bottlerocket-bootstrap-snow"],
Components: bundleManifestArtifacts["infrastructure-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 106 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"strings"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetTinkerbellBundle(r *releasetypes.ReleaseConfig, imageDigests map[string]string) (anywherev1alpha1.TinkerbellBundle, error) {
tinkerbellBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api-provider-tinkerbell": r.BundleArtifactsTable["cluster-api-provider-tinkerbell"],
"kube-vip": r.BundleArtifactsTable["kube-vip"],
"envoy": r.BundleArtifactsTable["envoy"],
"tink": r.BundleArtifactsTable["tink"],
"hegel": r.BundleArtifactsTable["hegel"],
"boots": r.BundleArtifactsTable["boots"],
"hub": r.BundleArtifactsTable["hub"],
"hook": r.BundleArtifactsTable["hook"],
"rufio": r.BundleArtifactsTable["rufio"],
"tinkerbell-chart": r.BundleArtifactsTable["tinkerbell-chart"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(tinkerbellBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
bundleArchiveArtifacts := map[string]anywherev1alpha1.Archive{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range tinkerbellBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
bundleImageArtifact := anywherev1alpha1.Image{}
if componentName == "cluster-api-provider-tinkerbell" {
sourceBranch = imageArtifact.SourcedFromBranch
}
if strings.HasSuffix(imageArtifact.AssetName, "chart") {
bundleImageArtifact = anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Helm chart for %s", imageArtifact.AssetName),
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
} else {
bundleImageArtifact = anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.TinkerbellBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
if artifact.Archive != nil {
archiveArtifact := artifact.Archive
bundleArchiveArtifact := anywherev1alpha1.Archive{
Name: archiveArtifact.ReleaseName,
Description: "Tinkerbell operating system installation environment (osie) component",
URI: archiveArtifact.ReleaseCdnURI,
}
bundleArchiveArtifacts[archiveArtifact.ReleaseName] = bundleArchiveArtifact
}
}
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CaptProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.TinkerbellBundle{}, errors.Wrapf(err, "Error getting version for cluster-api-provider-tinkerbell")
}
bundle := anywherev1alpha1.TinkerbellBundle{
Version: version,
ClusterAPIController: bundleImageArtifacts["cluster-api-provider-tinkerbell"],
KubeVip: bundleImageArtifacts["kube-vip"],
Envoy: bundleImageArtifacts["envoy"],
Components: bundleManifestArtifacts["infrastructure-components.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
ClusterTemplate: bundleManifestArtifacts["cluster-template.yaml"],
TinkerbellStack: anywherev1alpha1.TinkerbellStackBundle{
Actions: anywherev1alpha1.ActionsBundle{
Cexec: bundleImageArtifacts["cexec"],
Kexec: bundleImageArtifacts["kexec"],
ImageToDisk: bundleImageArtifacts["image2disk"],
OciToDisk: bundleImageArtifacts["oci2disk"],
Reboot: bundleImageArtifacts["reboot"],
WriteFile: bundleImageArtifacts["writefile"],
},
Boots: bundleImageArtifacts["boots"],
Hegel: bundleImageArtifacts["hegel"],
Hook: anywherev1alpha1.HookBundle{
Bootkit: bundleImageArtifacts["hook-bootkit"],
Docker: bundleImageArtifacts["hook-docker"],
Kernel: bundleImageArtifacts["hook-kernel"],
Initramfs: anywherev1alpha1.HookArch{
Arm: bundleArchiveArtifacts["initramfs-aarch64"],
Amd: bundleArchiveArtifacts["initramfs-x86_64"],
},
Vmlinuz: anywherev1alpha1.HookArch{
Arm: bundleArchiveArtifacts["vmlinuz-aarch64"],
Amd: bundleArchiveArtifacts["vmlinuz-x86_64"],
},
},
Rufio: bundleImageArtifacts["rufio"],
Tink: anywherev1alpha1.TinkBundle{
TinkController: bundleImageArtifacts["tink-controller"],
TinkServer: bundleImageArtifacts["tink-server"],
TinkWorker: bundleImageArtifacts["tink-worker"],
},
TinkebellChart: bundleImageArtifacts["tinkerbell-chart"],
},
}
return bundle, nil
}
| 166 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundles
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
bundleutils "github.com/aws/eks-anywhere/release/pkg/util/bundles"
"github.com/aws/eks-anywhere/release/pkg/version"
)
func GetVsphereBundle(r *releasetypes.ReleaseConfig, eksDReleaseChannel string, imageDigests map[string]string) (anywherev1alpha1.VSphereBundle, error) {
vsphereBundleArtifacts := map[string][]releasetypes.Artifact{
"cluster-api-provider-vsphere": r.BundleArtifactsTable["cluster-api-provider-vsphere"],
"kube-rbac-proxy": r.BundleArtifactsTable["kube-rbac-proxy"],
"kube-vip": r.BundleArtifactsTable["kube-vip"],
}
sortedComponentNames := bundleutils.SortArtifactsMap(vsphereBundleArtifacts)
var sourceBranch string
var componentChecksum string
bundleImageArtifacts := map[string]anywherev1alpha1.Image{}
bundleManifestArtifacts := map[string]anywherev1alpha1.Manifest{}
artifactHashes := []string{}
for _, componentName := range sortedComponentNames {
for _, artifact := range vsphereBundleArtifacts[componentName] {
if artifact.Image != nil {
imageArtifact := artifact.Image
if componentName == "cluster-api-provider-vsphere" {
sourceBranch = imageArtifact.SourcedFromBranch
}
bundleImageArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleImageArtifact
artifactHashes = append(artifactHashes, bundleImageArtifact.ImageDigest)
}
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
bundleManifestArtifact := anywherev1alpha1.Manifest{
URI: manifestArtifact.ReleaseCdnURI,
}
bundleManifestArtifacts[manifestArtifact.ReleaseName] = bundleManifestArtifact
manifestHash, err := version.GenerateManifestHash(r, manifestArtifact)
if err != nil {
return anywherev1alpha1.VSphereBundle{}, err
}
artifactHashes = append(artifactHashes, manifestHash)
}
}
}
vSphereCloudProviderArtifacts := r.BundleArtifactsTable[fmt.Sprintf("cloud-provider-vsphere-%s", eksDReleaseChannel)]
for _, artifact := range vSphereCloudProviderArtifacts {
imageArtifact := artifact.Image
bundleArtifact := anywherev1alpha1.Image{
Name: imageArtifact.AssetName,
Description: fmt.Sprintf("Container image for %s image", imageArtifact.AssetName),
OS: imageArtifact.OS,
Arch: imageArtifact.Arch,
URI: imageArtifact.ReleaseImageURI,
ImageDigest: imageDigests[imageArtifact.ReleaseImageURI],
}
bundleImageArtifacts[imageArtifact.AssetName] = bundleArtifact
artifactHashes = append(artifactHashes, bundleArtifact.ImageDigest)
}
if r.DryRun {
componentChecksum = version.FakeComponentChecksum
} else {
componentChecksum = version.GenerateComponentHash(artifactHashes, r.DryRun)
}
version, err := version.BuildComponentVersion(
version.NewVersionerWithGITTAG(r.BuildRepoSource, constants.CapvProjectPath, sourceBranch, r),
componentChecksum,
)
if err != nil {
return anywherev1alpha1.VSphereBundle{}, errors.Wrapf(err, "Error getting version for cluster-api-provider-vsphere")
}
bundle := anywherev1alpha1.VSphereBundle{
Version: version,
ClusterAPIController: bundleImageArtifacts["cluster-api-provider-vsphere"],
KubeProxy: bundleImageArtifacts["kube-rbac-proxy"],
Manager: bundleImageArtifacts["cloud-provider-vsphere"],
KubeVip: bundleImageArtifacts["kube-vip"],
Components: bundleManifestArtifacts["infrastructure-components.yaml"],
ClusterTemplate: bundleManifestArtifacts["cluster-template.yaml"],
Metadata: bundleManifestArtifacts["metadata.yaml"],
}
return bundle, nil
}
| 123 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clients
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
ecrsdk "github.com/aws/aws-sdk-go/service/ecr"
ecrpublicsdk "github.com/aws/aws-sdk-go/service/ecrpublic"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
docker "github.com/fsouza/go-dockerclient"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/aws/ecr"
"github.com/aws/eks-anywhere/release/pkg/aws/ecrpublic"
)
type SourceClients struct {
S3 *SourceS3Clients
ECR *SourceECRClient
}
type ReleaseClients struct {
S3 *ReleaseS3Clients
ECRPublic *ReleaseECRPublicClient
}
type SourceS3Clients struct {
Client *s3.S3
}
type ReleaseS3Clients struct {
Client *s3.S3
Uploader *s3manager.Uploader
}
type SourceECRClient struct {
EcrClient *ecrsdk.ECR
EcrPublicClient *ecrpublicsdk.ECRPublic
AuthConfig *docker.AuthConfiguration
}
type ReleaseECRPublicClient struct {
Client *ecrpublicsdk.ECRPublic
AuthConfig *docker.AuthConfiguration
}
// Function to create release clients for dev release.
func CreateDevReleaseClients(dryRun bool) (*SourceClients, *ReleaseClients, error) {
fmt.Println("\n==========================================================")
fmt.Println(" Dev Release Clients Creation")
fmt.Println("==========================================================")
if dryRun {
fmt.Println("Skipping clients creation in dry-run mode")
return nil, nil, nil
}
// PDX session for eks-a-build-prod-pdx
pdxSession, err := session.NewSession(&aws.Config{
Region: aws.String("us-west-2"),
})
if err != nil {
return nil, nil, errors.Cause(err)
}
// IAD session for eks-a-build-prod-pdx
iadSession, err := session.NewSession(&aws.Config{
Region: aws.String("us-east-1"),
})
if err != nil {
return nil, nil, errors.Cause(err)
}
// S3 client and uploader
s3Client := s3.New(pdxSession)
uploader := s3manager.NewUploader(pdxSession)
// Get source ECR auth config
ecrClient := ecrsdk.New(pdxSession)
sourceAuthConfig, err := ecr.GetAuthConfig(ecrClient)
if err != nil {
return nil, nil, errors.Cause(err)
}
// Get release ECR Public auth config
ecrPublicClient := ecrpublicsdk.New(iadSession)
releaseAuthConfig, err := ecrpublic.GetAuthConfig(ecrPublicClient)
if err != nil {
return nil, nil, errors.Cause(err)
}
// Constructing source clients
sourceClients := &SourceClients{
S3: &SourceS3Clients{
Client: s3Client,
},
ECR: &SourceECRClient{
EcrClient: ecrClient,
AuthConfig: sourceAuthConfig,
},
}
// Constructing release clients
releaseClients := &ReleaseClients{
S3: &ReleaseS3Clients{
Client: s3Client,
Uploader: uploader,
},
ECRPublic: &ReleaseECRPublicClient{
Client: ecrPublicClient,
AuthConfig: releaseAuthConfig,
},
}
return sourceClients, releaseClients, nil
}
// Function to create clients for staging release.
func CreateStagingReleaseClients() (*SourceClients, *ReleaseClients, error) {
fmt.Println("\n==========================================================")
fmt.Println(" Staging Release Clients Creation")
fmt.Println("==========================================================")
// Session for eks-a-build-prod-pdx
sourceSession, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("us-west-2"),
},
})
if err != nil {
return nil, nil, errors.Cause(err)
}
// Session for eks-a-artifact-beta-iad
releaseSession, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("us-east-1"),
},
Profile: "artifacts-staging",
})
if err != nil {
return nil, nil, errors.Cause(err)
}
// Source S3 client
sourceS3Client := s3.New(sourceSession)
// Release S3 client and uploader
releaseS3Client := s3.New(releaseSession)
uploader := s3manager.NewUploader(releaseSession)
// Get source ECR auth config
ecrClient := ecrsdk.New(sourceSession)
sourceAuthConfig, err := ecr.GetAuthConfig(ecrClient)
if err != nil {
return nil, nil, errors.Cause(err)
}
// Get release ECR Public auth config
ecrPublicClient := ecrpublicsdk.New(releaseSession)
releaseAuthConfig, err := ecrpublic.GetAuthConfig(ecrPublicClient)
if err != nil {
return nil, nil, errors.Cause(err)
}
// Constructing source clients
sourceClients := &SourceClients{
S3: &SourceS3Clients{
Client: sourceS3Client,
},
ECR: &SourceECRClient{
EcrClient: ecrClient,
AuthConfig: sourceAuthConfig,
},
}
// Constructing release clients
releaseClients := &ReleaseClients{
S3: &ReleaseS3Clients{
Client: releaseS3Client,
Uploader: uploader,
},
ECRPublic: &ReleaseECRPublicClient{
Client: ecrPublicClient,
AuthConfig: releaseAuthConfig,
},
}
return sourceClients, releaseClients, nil
}
// Function to create clients for production release.
func CreateProdReleaseClients() (*SourceClients, *ReleaseClients, error) {
fmt.Println("\n==========================================================")
fmt.Println(" Production Release Clients Creation")
fmt.Println("==========================================================")
// Session for eks-a-artifact-beta-iad
sourceSession, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("us-east-1"),
},
Profile: "artifacts-staging",
})
if err != nil {
return nil, nil, errors.Cause(err)
}
// Session for eks-a-artifact-prod-iad
releaseSession, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Region: aws.String("us-east-1"),
},
Profile: "artifacts-production",
})
if err != nil {
return nil, nil, errors.Cause(err)
}
// Source S3 client
sourceS3Client := s3.New(sourceSession)
// Release S3 client and uploader
releaseS3Client := s3.New(releaseSession)
uploader := s3manager.NewUploader(releaseSession)
// Get source ECR Public auth config
sourceEcrPublicClient := ecrpublicsdk.New(sourceSession)
sourceAuthConfig, err := ecrpublic.GetAuthConfig(sourceEcrPublicClient)
if err != nil {
return nil, nil, errors.Cause(err)
}
// Get release ECR Public auth config
releaseEcrPublicClient := ecrpublicsdk.New(releaseSession)
releaseAuthConfig, err := ecrpublic.GetAuthConfig(releaseEcrPublicClient)
if err != nil {
return nil, nil, errors.Cause(err)
}
// Constructing release clients
sourceClients := &SourceClients{
S3: &SourceS3Clients{
Client: sourceS3Client,
},
ECR: &SourceECRClient{
EcrPublicClient: sourceEcrPublicClient,
AuthConfig: sourceAuthConfig,
},
}
// Constructing release clients
releaseClients := &ReleaseClients{
S3: &ReleaseS3Clients{
Client: releaseS3Client,
Uploader: uploader,
},
ECRPublic: &ReleaseECRPublicClient{
Client: releaseEcrPublicClient,
AuthConfig: releaseAuthConfig,
},
}
return sourceClients, releaseClients, nil
}
| 281 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package constants
const (
// Artifacts-related constants.
ReleaseKind = "Release"
BundlesKind = "Bundles"
HexAlphabet = "0123456789abcdef"
SuccessIcon = "✅"
FakeComponentChecksum = "abcdef1"
FakeGitCommit = "0123456789abcdef0123456789abcdef01234567"
ReleaseFolderName = "release"
EksDReleaseComponentsUrl = "https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml"
YamlSeparator = "\n---\n"
// Project paths.
CapasProjectPath = "projects/aws/cluster-api-provider-aws-snow"
CapcProjectPath = "projects/kubernetes-sigs/cluster-api-provider-cloudstack"
CapiProjectPath = "projects/kubernetes-sigs/cluster-api"
CaptProjectPath = "projects/tinkerbell/cluster-api-provider-tinkerbell"
CapvProjectPath = "projects/kubernetes-sigs/cluster-api-provider-vsphere"
CapxProjectPath = "projects/nutanix-cloud-native/cluster-api-provider-nutanix"
CertManagerProjectPath = "projects/cert-manager/cert-manager"
CiliumProjectPath = "projects/cilium/cilium"
EtcdadmBootstrapProviderProjectPath = "projects/aws/etcdadm-bootstrap-provider"
EtcdadmControllerProjectPath = "projects/aws/etcdadm-controller"
FluxcdRootPath = "projects/fluxcd"
Flux2ProjectPath = "projects/fluxcd/flux2"
HookProjectPath = "projects/tinkerbell/hook"
ImageBuilderProjectPath = "projects/kubernetes-sigs/image-builder"
KindProjectPath = "projects/kubernetes-sigs/kind"
KubeRbacProxyProjectPath = "projects/brancz/kube-rbac-proxy"
PackagesProjectPath = "projects/aws/eks-anywhere-packages"
// Date format with standard reference time values
// The reference time used is the specific time stamp:
//
// 01/02 03:04:05PM '06 -0700
//
// (January 2, 15:04:05, 2006, in time zone seven hours west of GMT).
YYYYMMDD = "2006-01-02"
)
| 56 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filereader
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
k8syaml "sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/release/pkg/aws/s3"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/git"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
type EksDLatestRelease struct {
Branch string `json:"branch"`
KubeVersion string `json:"kubeVersion"`
Number int `json:"number"`
Dev bool `json:"dev,omitempty"`
}
type EksDLatestReleases struct {
Releases []EksDLatestRelease `json:"releases"`
Latest string `json:"latest"`
}
func ReadShaSums(filename string, r *releasetypes.ReleaseConfig) (string, string, error) {
var sha256, sha512 string
var err error
if r.DryRun {
sha256, err = artifactutils.GetFakeSHA(256)
if err != nil {
return "", "", errors.Cause(err)
}
sha512, err = artifactutils.GetFakeSHA(512)
if err != nil {
return "", "", errors.Cause(err)
}
} else {
sha256Path := filename + ".sha256"
sha256, err = readShaFile(sha256Path)
if err != nil {
return "", "", errors.Cause(err)
}
sha512Path := filename + ".sha512"
sha512, err = readShaFile(sha512Path)
if err != nil {
return "", "", errors.Cause(err)
}
}
return sha256, sha512, nil
}
func readShaFile(filename string) (string, error) {
data, err := os.ReadFile(filename)
if err != nil {
return "", errors.Cause(err)
}
if parts := strings.Split(string(data), " "); len(parts) == 2 {
return parts[0], nil
}
return "", errors.Errorf("Error parsing shasum file %s", filename)
}
func ReadFileContentsTrimmed(filename string) (string, error) {
data, err := os.ReadFile(filename)
if err != nil {
return "", errors.Cause(err)
}
return strings.TrimSpace(string(data)), nil
}
func ReadEksDReleases(r *releasetypes.ReleaseConfig) (*EksDLatestReleases, error) {
// Read the eks-d latest release file to get all the releases
eksDLatestReleases := &EksDLatestReleases{}
eksDReleaseFilePath := filepath.Join(r.BuildRepoSource, "EKSD_LATEST_RELEASES")
eksDReleaseFile, err := os.ReadFile(eksDReleaseFilePath)
if err != nil {
return nil, errors.Cause(err)
}
err = yaml.Unmarshal(eksDReleaseFile, eksDLatestReleases)
if err != nil {
return nil, errors.Cause(err)
}
return eksDLatestReleases, nil
}
func GetSupportedK8sVersions(r *releasetypes.ReleaseConfig) ([]string, error) {
// Read the eks-d latest release file to get all the releases
releaseFilePath := filepath.Join(r.BuildRepoSource, constants.ReleaseFolderName, "SUPPORTED_RELEASE_BRANCHES")
releaseFile, err := os.ReadFile(releaseFilePath)
if err != nil {
return nil, errors.Cause(err)
}
supportedK8sVersions := strings.Split(strings.TrimRight(string(releaseFile), "\n"), "\n")
return supportedK8sVersions, nil
}
func GetBottlerocketSupportedK8sVersionsByFormat(r *releasetypes.ReleaseConfig, imageFormat string) ([]string, error) {
if r.DryRun {
return []string{"1-21", "1-22", "1-23", "1-24"}, nil
}
// Read the eks-d latest release file to get all the releases
var bottlerocketReleaseMap map[string]interface{}
var bottlerocketSupportedK8sVersions []string
bottlerocketReleasesFilename := "BOTTLEROCKET_RELEASES"
bottlerocketReleasesFilePath := filepath.Join(r.BuildRepoSource, constants.ImageBuilderProjectPath, bottlerocketReleasesFilename)
bottlerocketReleasesFileContents, err := os.ReadFile(bottlerocketReleasesFilePath)
if err != nil {
return nil, errors.Cause(err)
}
err = yaml.Unmarshal(bottlerocketReleasesFileContents, &bottlerocketReleaseMap)
if err != nil {
return nil, errors.Cause(err)
}
for channel := range bottlerocketReleaseMap {
// new format for BR releases file
releaseVersionByFormat := bottlerocketReleaseMap[channel].(map[string]interface{})[fmt.Sprintf("%s-release-version", imageFormat)]
if releaseVersionByFormat != nil {
bottlerocketSupportedK8sVersions = append(bottlerocketSupportedK8sVersions, channel)
}
}
return bottlerocketSupportedK8sVersions, nil
}
func GetBottlerocketContainerMetadata(r *releasetypes.ReleaseConfig, filename string) (string, string, error) {
var bottlerocketContainerMetadataMap map[string]interface{}
bottlerocketContainerMetadataFilePath := filepath.Join(r.BuildRepoSource, constants.ImageBuilderProjectPath, filename)
metadata, err := os.ReadFile(bottlerocketContainerMetadataFilePath)
if err != nil {
return "", "", errors.Cause(err)
}
err = yaml.Unmarshal(metadata, &bottlerocketContainerMetadataMap)
if err != nil {
return "", "", errors.Cause(err)
}
tag, imageDigest := bottlerocketContainerMetadataMap["tag"].(string), bottlerocketContainerMetadataMap["imageDigest"].(string)
return tag, imageDigest, nil
}
func GetEksDReleaseManifestUrl(releaseChannel, releaseNumber string, dev bool) string {
if dev {
return fmt.Sprintf("https://eks-d-postsubmit-artifacts.s3.us-west-2.amazonaws.com/kubernetes-%[1]s/kubernetes-%[1]s-eks-%s.yaml", releaseChannel, releaseNumber)
}
return fmt.Sprintf("https://distro.eks.amazonaws.com/kubernetes-%[1]s/kubernetes-%[1]s-eks-%s.yaml", releaseChannel, releaseNumber)
}
// GetNextEksADevBuildNumber computes next eksa dev build number for the current eks-a dev build
func GetNextEksADevBuildNumber(releaseVersion string, r *releasetypes.ReleaseConfig) (int, error) {
tempFileName := "latest-dev-release-version"
var latestReleaseKey, latestBuildVersion string
var currentEksaBuildNumber int
if r.BuildRepoBranchName == "main" {
latestReleaseKey = "LATEST_RELEASE_VERSION"
} else {
latestReleaseKey = fmt.Sprintf("%s/LATEST_RELEASE_VERSION", r.BuildRepoBranchName)
}
if s3.KeyExists(r.ReleaseBucket, latestReleaseKey) {
err := s3.DownloadFile(tempFileName, r.ReleaseBucket, latestReleaseKey)
if err != nil {
return -1, errors.Cause(err)
}
// Check if current version and latest version are the same semver
latestBuildS3, err := os.ReadFile(tempFileName)
if err != nil {
return -1, errors.Cause(err)
}
latestBuildVersion = string(latestBuildS3)
if releaseVersion == "vDev" { // TODO: remove when we update the pipeline
releaseVersion = "v0.0.0"
}
currentEksaBuildNumber, err = NewBuildNumberFromLastVersion(latestBuildVersion, releaseVersion, r.BuildRepoBranchName)
if err != nil {
return -1, errors.Cause(err)
}
} else {
currentEksaBuildNumber = 0
}
return currentEksaBuildNumber, nil
}
// NewBuildNumberFromLastVersion bumps the build number for eksa dev build version if found
func NewBuildNumberFromLastVersion(latestEksaBuildVersion, releaseVersion, branchName string) (int, error) {
if releaseVersion == "vDev" { // TODO: remove when we update the pipeline
releaseVersion = "v0.0.0"
}
if !strings.Contains(latestEksaBuildVersion, releaseVersion) && !strings.Contains(latestEksaBuildVersion, "vDev") { // TODO: adding vDev case temporally to support old run, remove later
// different semver, reset build number suffix on release version
return 0, nil
}
// Same semver, only bump build number suffix on release version
i := strings.LastIndex(latestEksaBuildVersion, ".")
if i == -1 || i >= len(latestEksaBuildVersion)-1 {
return -1, fmt.Errorf("invalid dev release version found for latest release: %s", latestEksaBuildVersion)
}
lastBuildNumber, err := strconv.Atoi(latestEksaBuildVersion[i+1:])
if err != nil {
return -1, fmt.Errorf("invalid dev release version found for latest release [%s]: %v", latestEksaBuildVersion, err)
}
newBuildNumber := lastBuildNumber + 1
return newBuildNumber, nil
}
func GetCurrentEksADevReleaseVersion(releaseVersion string, r *releasetypes.ReleaseConfig, buildNumber int) (string, error) {
fmt.Println("\n==========================================================")
fmt.Println(" Dev Release Version Computation")
fmt.Println("==========================================================")
if releaseVersion == "vDev" { // TODO: remove when we update the pipeline
releaseVersion = "v0.0.0"
}
releaseVersionIdentifier := "dev+build"
if r.BuildRepoBranchName != "main" {
releaseVersionIdentifier = fmt.Sprintf("dev-%s+build", r.BuildRepoBranchName)
}
var newDevReleaseVersion string
if r.Weekly {
newDevReleaseVersion = fmt.Sprintf("v0.0.0-%s.%s", releaseVersionIdentifier, r.ReleaseDate)
} else {
newDevReleaseVersion = fmt.Sprintf("v0.0.0-%s.%d", releaseVersionIdentifier, buildNumber)
}
fmt.Printf("New dev release release version: %s\n", newDevReleaseVersion)
fmt.Printf("%s Successfully computed current dev release version\n", constants.SuccessIcon)
return newDevReleaseVersion, nil
}
func PutEksAReleaseVersion(version string, r *releasetypes.ReleaseConfig) error {
var currentReleaseKey string
if r.BuildRepoBranchName == "main" {
currentReleaseKey = "LATEST_RELEASE_VERSION"
} else {
currentReleaseKey = fmt.Sprintf("%s/LATEST_RELEASE_VERSION", r.BuildRepoBranchName)
}
err := os.MkdirAll(filepath.Dir(currentReleaseKey), 0o755)
if err != nil {
return errors.Cause(err)
}
f, err := os.Create(currentReleaseKey)
if err != nil {
return errors.Cause(err)
}
defer os.Remove(f.Name())
versionByteArr := []byte(version)
if _, err = f.Write(versionByteArr); err != nil {
return errors.Cause(err)
}
// Upload the file to S3
fmt.Println("Uploading latest release version file")
err = s3.UploadFile(currentReleaseKey, aws.String(r.ReleaseBucket), aws.String(currentReleaseKey), r.ReleaseClients.S3.Uploader)
if err != nil {
return errors.Cause(err)
}
return nil
}
func GetEksdRelease(eksdReleaseURL string) (*eksdv1alpha1.Release, error) {
content, err := ReadHttpFile(eksdReleaseURL)
if err != nil {
return nil, err
}
eksd := &eksdv1alpha1.Release{}
if err = k8syaml.UnmarshalStrict(content, eksd); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal eksd manifest")
}
return eksd, nil
}
// Retrieve values from https://github.com/aws/eks-anywhere-build-tooling/blob/main/EKSD_LATEST_RELEASES
func GetEksdReleaseValues(release interface{}) (string, bool) {
releaseNumber := release.(map[interface{}]interface{})["number"]
releaseNumberInt := releaseNumber.(int)
releaseNumberStr := strconv.Itoa(releaseNumberInt)
dev := false
devValue := release.(map[interface{}]interface{})["dev"]
if devValue != nil && devValue.(bool) {
dev = true
}
return releaseNumberStr, dev
}
func ReadHttpFile(uri string) ([]byte, error) {
resp, err := http.Get(uri)
if err != nil {
return nil, errors.Wrapf(err, "failed reading file from url [%s]", uri)
}
defer resp.Body.Close()
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrapf(err, "failed reading file from url [%s]", uri)
}
return data, nil
}
func ReadGitTag(projectPath, gitRootPath, branch string) (string, error) {
_, err := git.CheckoutRepo(gitRootPath, branch)
if err != nil {
return "", fmt.Errorf("error reading git tag: %v", err)
}
tagFile := filepath.Join(gitRootPath, projectPath, "GIT_TAG")
gitTag, err := ReadFileContentsTrimmed(tagFile)
if err != nil {
return "", fmt.Errorf("error reading git tag: %v", err)
}
return gitTag, nil
}
| 357 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filereader
import (
"testing"
)
func TestNewBuildNumberFromLastVersion(t *testing.T) {
testCases := []struct {
testName string
latestBuildVersion string
releaseVersion string
branch string
want int
}{
{
testName: "vDev release with latest v0.0.0",
latestBuildVersion: "v0.0.0-dev+build.5",
releaseVersion: "vDev",
branch: "main",
want: 6,
},
{
testName: "v0.0.0 release with latest v0.0.0",
latestBuildVersion: "v0.0.0-dev+build.68",
releaseVersion: "v0.0.0",
branch: "main",
want: 69,
},
{
testName: "different semver",
latestBuildVersion: "v0.0.0-dev+build.5",
releaseVersion: "v0.0.1",
branch: "main",
want: 0,
},
{
testName: "vDev release with latest v0.0.0, non-main",
latestBuildVersion: "v0.0.0-dev-v1beta+build.5",
releaseVersion: "vDev",
branch: "v1beta1",
want: 6,
},
{
testName: "v0.0.0 release with latest v0.0.0, non-main branch",
latestBuildVersion: "v0.0.0-dev-v1beta1+build.0",
releaseVersion: "v0.0.0",
branch: "v1beta1",
want: 1,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
if got, err := NewBuildNumberFromLastVersion(tt.latestBuildVersion, tt.releaseVersion, tt.branch); err != nil {
t.Fatalf("NewBuildNumberFromLastVersion err = %s, want err = nil", err)
} else if got != tt.want {
t.Fatalf("NewBuildNumberFromLastVersion version = %d, want %d", got, tt.want)
}
})
}
}
| 76 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package git
import (
"fmt"
"os/exec"
commandutils "github.com/aws/eks-anywhere/release/pkg/util/command"
)
func CloneRepo(cloneUrl, destination string) (string, error) {
cloneRepoCommandSequence := fmt.Sprintf("git clone --depth 1 %s %[2]s; cd %[2]s; git config --unset-all remote.origin.fetch; git config --add remote.origin.fetch '+refs/heads/*:refs/remotes/origin/*'; git fetch --unshallow; git pull --all", cloneUrl, destination)
cmd := exec.Command("bash", "-c", cloneRepoCommandSequence)
return commandutils.ExecCommand(cmd)
}
func CheckoutRepo(gitRoot, branch string) (string, error) {
cmd := exec.Command("git", "-C", gitRoot, "checkout", branch)
return commandutils.ExecCommand(cmd)
}
func DescribeTag(gitRoot string) (string, error) {
cmd := exec.Command("git", "-C", gitRoot, "describe", "--tag")
return commandutils.ExecCommand(cmd)
}
func GetRepoTagsDescending(gitRoot string) (string, error) {
cmd := exec.Command("git", "-C", gitRoot, "tag", "-l", "v*", "--sort", "-v:refname")
return commandutils.ExecCommand(cmd)
}
func GetHead(gitRoot string) (string, error) {
cmd := exec.Command("git", "-C", gitRoot, "rev-parse", "HEAD")
return commandutils.ExecCommand(cmd)
}
func GetRepoRoot() (string, error) {
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
return commandutils.ExecCommand(cmd)
}
| 54 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helm
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
docker "github.com/fsouza/go-dockerclient"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/registry"
"k8s.io/helm/pkg/chartutil"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/yaml"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
commandutils "github.com/aws/eks-anywhere/release/pkg/util/command"
)
var HelmLog = ctrl.Log.WithName("HelmLog")
// helmDriver implements PackageDriver to install packages from Helm charts.
type helmDriver struct {
cfg *action.Configuration
log logr.Logger
settings *cli.EnvSettings
}
func NewHelm() (*helmDriver, error) {
settings := cli.New()
client, err := registry.NewClient()
if err != nil {
return nil, fmt.Errorf("creating registry client while initializing helm driver: %w", err)
}
cfg := &action.Configuration{RegistryClient: client}
err = cfg.Init(settings.RESTClientGetter(), settings.Namespace(),
os.Getenv("HELM_DRIVER"), helmLog(HelmLog))
if err != nil {
return nil, fmt.Errorf("initializing helm driver: %w", err)
}
return &helmDriver{
cfg: cfg,
log: HelmLog,
settings: settings,
}, nil
}
func GetHelmDest(d *helmDriver, r *releasetypes.ReleaseConfig, ReleaseImageURI, assetName string) (string, error) {
var chartPath string
var err error
err = d.HelmRegistryLogin(r, "source")
if err != nil {
return "", fmt.Errorf("logging into the source registry: %w", err)
}
helmChart := strings.Split(ReleaseImageURI, ":")
fmt.Printf("Starting to modifying helm chart %s\n", helmChart[0])
fmt.Printf("Pulling helm chart %s\n", ReleaseImageURI)
chartPath, err = d.PullHelmChart(helmChart[0], helmChart[1])
if err != nil {
return "", fmt.Errorf("pulling the helm chart: %w", err)
}
err = d.HelmRegistryLogout(r, "source")
if err != nil {
return "", fmt.Errorf("logging out of the source registry: %w", err)
}
pwd, err := os.Getwd()
dest := filepath.Join(pwd, assetName)
if err != nil {
return "", fmt.Errorf("getting current working dir: %w", err)
}
fmt.Printf("Untar helm chart %s into %s\n", chartPath, dest)
err = UnTarHelmChart(chartPath, assetName, dest)
if err != nil {
return "", fmt.Errorf("untar the helm chart: %w", err)
}
helmDest := filepath.Join(pwd, assetName, assetName)
return helmDest, nil
}
func GetChartImageTags(d *helmDriver, helmDest string) (*Requires, error) {
f, err := HasRequires(helmDest)
if err != nil {
return &Requires{}, fmt.Errorf("finding the requires.yaml: %w", err)
}
helmRequires, err := ValidateHelmRequires(f)
if err != nil {
return &Requires{}, fmt.Errorf("turning requires.yaml to struct: %w", err)
}
return helmRequires, nil
}
func ModifyAndPushChartYaml(i releasetypes.ImageArtifact, r *releasetypes.ReleaseConfig, d *helmDriver, helmDest string, eksArtifacts map[string][]releasetypes.Artifact, shaMap map[string]anywherev1alpha1.Image) error {
helmChart := strings.Split(i.ReleaseImageURI, ":")
helmtag := helmChart[1]
// Overwrite Chart.yaml
fmt.Printf("Checking inside helm chart for Chart.yaml %s\n", helmDest)
chart, err := HasChart(helmDest)
if err != nil {
return fmt.Errorf("finding the Chart.yaml: %w", err)
}
chartYaml, err := ValidateHelmChart(chart)
if err != nil {
return fmt.Errorf("turning Chart.yaml to struct: %w", err)
}
chartYaml.Version = helmtag
fmt.Printf("Overwriting helm chart.yaml version to new tag %s\n", chartYaml.Version)
err = OverwriteChartYaml(fmt.Sprintf("%s/%s", helmDest, "Chart.yaml"), chartYaml)
if err != nil {
return fmt.Errorf("overwriting the Chart.yaml version: %w", err)
}
// If the chart is packages, we find the image tag values and overide them in the values.yaml.
if strings.Contains(helmDest, "eks-anywhere-packages") {
imageTagMap, err := GetPackagesImageTags(eksArtifacts)
fmt.Printf("Overwriting helm values.yaml version to new image tags %v\n", imageTagMap)
err = OverWriteChartValuesImageTag(helmDest, imageTagMap)
if err != nil {
return fmt.Errorf("overwriting the values.yaml version: %w", err)
}
if shaMap != nil {
fmt.Printf("Overwriting helm values.yaml image shas to new image shas for Dev Release %v\n", shaMap)
err = OverWriteChartValuesImageSha(helmDest, shaMap)
if err != nil {
return fmt.Errorf("overwriting the values.yaml version: %w", err)
}
}
}
fmt.Printf("Re-Packaging modified helm chart %s\n", helmDest)
packaged, err := PackageHelmChart(helmDest)
if err != nil {
return fmt.Errorf("packaging the helm chart: %w", err)
}
fmt.Printf("Pushing modified helm chart %s to %s\n", packaged, r.ReleaseContainerRegistry)
err = d.HelmRegistryLogin(r, "destination")
if err != nil {
return fmt.Errorf("logging into the destination registry: %w", err)
}
err = PushHelmChart(packaged, filepath.Dir(helmChart[0]))
if err != nil {
return fmt.Errorf("pushing the helm chart: %w", err)
}
err = d.HelmRegistryLogout(r, "destination")
if err != nil {
return fmt.Errorf("logging out of the destination registry: %w", err)
}
return nil
}
func (d *helmDriver) HelmRegistryLogin(r *releasetypes.ReleaseConfig, remoteType string) error {
var authConfig *docker.AuthConfiguration
var remote string
if remoteType == "source" {
authConfig = r.SourceClients.ECR.AuthConfig
remote = r.SourceContainerRegistry
} else if remoteType == "destination" {
authConfig = r.ReleaseClients.ECRPublic.AuthConfig
remote = r.ReleaseContainerRegistry
}
login := action.NewRegistryLogin(d.cfg)
err := login.Run(os.Stdout, remote, authConfig.Username, authConfig.Password, false)
if err != nil {
return fmt.Errorf("running the Helm registry login command: %w", err)
}
return nil
}
func (d *helmDriver) HelmRegistryLogout(r *releasetypes.ReleaseConfig, remoteType string) error {
var remote string
if remoteType == "source" {
remote = r.SourceContainerRegistry
} else if remoteType == "destination" {
remote = r.ReleaseContainerRegistry
}
logout := action.NewRegistryLogout(d.cfg)
err := logout.Run(os.Stdout, remote)
if err != nil {
return fmt.Errorf("running the Helm registry logout command: %w", err)
}
return nil
}
// PullHelmChart will take in a a remote Helm URI and attempt to pull down the chart if it exists.
func (d *helmDriver) PullHelmChart(name, version string) (string, error) {
if name == "" || version == "" {
return "", fmt.Errorf("empty input for PullHelmChart, check flags")
}
install := action.NewInstall(d.cfg)
install.ChartPathOptions.Version = version
if !strings.HasPrefix(name, "oci://") {
name = fmt.Sprintf("oci://%s", name)
}
chartPath, err := install.LocateChart(name, d.settings)
if err != nil || chartPath == "" {
return "", fmt.Errorf("running the Helm LocateChart command, you might need run an AWS ECR Login: %w", err)
}
return chartPath, nil
}
// PushHelmChart will take in packaged helm chart and push to a remote URI.
func PushHelmChart(packaged, URI string) error {
if !strings.HasPrefix(URI, "oci://") {
URI = fmt.Sprintf("oci://%s", URI)
}
cmd := exec.Command("helm", "push", packaged, URI)
out, err := commandutils.ExecCommand(cmd)
fmt.Println(out)
if err != nil {
return fmt.Errorf("running Helm push command on URI %s: %v", URI, err)
}
return nil
}
// PackageHelmChart will package a dir into a helm chart.
func PackageHelmChart(dir string) (string, error) {
if dir == "" {
return "", fmt.Errorf("empty input for PackageHelmChart, check flags")
}
p := action.NewPackage()
vals := new(map[string]interface{})
packaged, err := p.Run(dir, *vals)
if err != nil || packaged == "" {
return "", fmt.Errorf("running the Helm Package command %w", err)
}
return packaged, nil
}
// helmLog wraps logr.Logger to make it compatible with helm's DebugLog.
func helmLog(log logr.Logger) action.DebugLog {
return func(template string, args ...interface{}) {
log.Info(fmt.Sprintf(template, args...))
}
}
// UnTarHelmChart will attempt to move the helm chart out of the helm cache, by untaring it to the pwd and creating the filesystem to unpack it into.
func UnTarHelmChart(chartRef, chartPath, dest string) error {
if chartRef == "" || chartPath == "" || dest == "" {
return fmt.Errorf("Empty input value given for UnTarHelmChart")
}
_, err := os.Stat(dest)
if os.IsNotExist(err) {
if _, err := os.Stat(chartPath); err != nil {
if err := os.MkdirAll(chartPath, 0o755); err != nil {
return errors.Wrap(err, "failed to untar (mkdir)")
}
} else {
return errors.Errorf("failed to untar: a file or directory with the name %s already exists", dest)
}
} else {
if err != nil { // Checks directory check errors such as permission issues to read
return errors.Errorf("failed UnTarHelmChart: %s", err)
}
}
// Untar the files, and create the directory structure
return chartutil.ExpandFile(dest, chartRef)
}
// HasRequires checks for the existance of the requires.yaml within the helm directory.
func HasRequires(helmdir string) (string, error) {
requires := filepath.Join(helmdir, "requires.yaml")
info, err := os.Stat(requires)
if os.IsNotExist(err) {
return "", err
}
if info.IsDir() {
return "", fmt.Errorf("found Dir, not requires.yaml file")
}
return requires, nil
}
// ValidateHelmRequires runs the parse file into struct function, and validations.
func ValidateHelmRequires(fileName string) (*Requires, error) {
helmrequires := &Requires{}
err := parseHelmRequires(fileName, helmrequires)
if err != nil {
return nil, err
}
err = validateHelmRequiresContent(helmrequires)
if err != nil {
return nil, err
}
return helmrequires, err
}
// validateHelmRequiresContent loops over the validation tests.
func validateHelmRequiresContent(helmrequires *Requires) error {
for _, v := range helmRequiresValidations {
if err := v(helmrequires); err != nil {
return err
}
}
return nil
}
var helmRequiresValidations = []func(*Requires) error{
validateHelmRequiresName,
}
func validateHelmRequiresName(helmrequires *Requires) error {
err := helmrequires.validateHelmRequiresNotEmpty()
if err != nil {
return err
}
return nil
}
// validateHelmRequiresNotEmpty checks that it has at least one image in the spec.
func (helmrequires *Requires) validateHelmRequiresNotEmpty() error {
// Check if Projects are listed
if len(helmrequires.Spec.Images) < 1 {
return fmt.Errorf("should use non-empty list of images for requires")
}
return nil
}
// parseHelmRequires will attempt to unpack the requires.yaml into the Go struct `Requires`.
func parseHelmRequires(fileName string, helmrequires *Requires) error {
content, err := os.ReadFile(fileName)
if err != nil {
return fmt.Errorf("unable to read file due to: %v", err)
}
for _, c := range strings.Split(string(content), constants.YamlSeparator) {
if err = yaml.Unmarshal([]byte(c), helmrequires); err != nil {
return fmt.Errorf("unable to parse %s\nyaml: %s\n %v", fileName, string(c), err)
}
err = yaml.UnmarshalStrict([]byte(c), helmrequires)
if err != nil {
return fmt.Errorf("unable to UnmarshalStrict %v\nyaml: %s\n %v", helmrequires, string(c), err)
}
return nil
}
return fmt.Errorf("requires.yaml file [%s] is invalid or does not contain kind %v", fileName, helmrequires)
}
// Chart yaml functions
// HasChart checks for the existance of the Chart.yaml within the helm directory.
func HasChart(helmdir string) (string, error) {
requires := filepath.Join(helmdir, "Chart.yaml")
info, err := os.Stat(requires)
if os.IsNotExist(err) {
return "", err
}
if info.IsDir() {
return "", fmt.Errorf("found Dir, not Chart.yaml file")
}
return requires, nil
}
// ValidateHelmChart runs the parse file into struct function, and validations.
func ValidateHelmChart(fileName string) (*chart.Metadata, error) {
helmChart := &chart.Metadata{}
err := parseHelmChart(fileName, helmChart)
if err != nil {
return nil, err
}
return helmChart, err
}
// parseHelmChart will attempt to unpack the Chart.yaml into the Go struct `Chart`.
func parseHelmChart(fileName string, helmChart *chart.Metadata) error {
content, err := os.ReadFile(fileName)
if err != nil {
return fmt.Errorf("unable to read file due to: %v", err)
}
for _, c := range strings.Split(string(content), constants.YamlSeparator) {
if err = yaml.Unmarshal([]byte(c), helmChart); err != nil {
return fmt.Errorf("unable to parse %s\nyaml: %s\n %v", fileName, string(c), err)
}
err = yaml.UnmarshalStrict([]byte(c), helmChart)
if err != nil {
return fmt.Errorf("unable to UnmarshalStrict %v\nyaml: %s\n %v", helmChart, string(c), err)
}
return nil
}
return fmt.Errorf("Chart.yaml file [%s] is invalid or does not contain kind %v", fileName, helmChart)
}
func OverwriteChartYaml(filename string, helmChart *chart.Metadata) error {
yamlData, err := yaml.Marshal(&helmChart)
if err != nil {
return fmt.Errorf("unable to Marshal %v\nyamlData: %s\n %v", helmChart, yamlData, err)
}
err = os.WriteFile(filename, yamlData, 0o644)
if err != nil {
return err
}
return nil
}
func OverWriteChartValuesImageTag(filename string, tagMap map[string]string) error {
packagesURI := strings.Split(tagMap["eks-anywhere-packages"], ":")
refresherURI := strings.Split(tagMap["ecr-token-refresher"], ":")
valuesFile := filepath.Join(filename, "values.yaml")
values, err := chartutil.ReadValuesFile(valuesFile)
if err != nil {
return err
}
values["controller"].(map[string]interface{})["tag"] = packagesURI[len(packagesURI)-1]
values["cronjob"].(map[string]interface{})["tag"] = refresherURI[len(refresherURI)-1]
yamlData, err := yaml.Marshal(&values)
if err != nil {
return fmt.Errorf("unable to Marshal %v\nyamlData: %s\n %v", values, yamlData, err)
}
err = os.WriteFile(valuesFile, yamlData, 0o644)
if err != nil {
return err
}
return nil
}
func OverWriteChartValuesImageSha(filename string, shaMap map[string]anywherev1alpha1.Image) error {
valuesFile := filepath.Join(filename, "values.yaml")
values, err := chartutil.ReadValuesFile(valuesFile)
if err != nil {
return err
}
values["controller"].(map[string]interface{})["digest"] = shaMap["eks-anywhere-packages"].ImageDigest
values["cronjob"].(map[string]interface{})["digest"] = shaMap["ecr-token-refresher"].ImageDigest
yamlData, err := yaml.Marshal(&values)
if err != nil {
return fmt.Errorf("unable to Marshal %v\nyamlData: %s\n %v", values, yamlData, err)
}
err = os.WriteFile(valuesFile, yamlData, 0o644)
if err != nil {
return err
}
return nil
}
func GetPackagesImageTags(eksArtifacts map[string][]releasetypes.Artifact) (map[string]string, error) {
m := make(map[string]string)
for _, artifacts := range eksArtifacts {
for _, artifact := range artifacts {
if artifact.Image != nil {
if artifact.Image.AssetName == "eks-anywhere-packages" || artifact.Image.AssetName == "ecr-token-refresher" {
m[artifact.Image.AssetName] = artifact.Image.ReleaseImageURI
}
}
}
}
if len(m) == 0 {
return nil, fmt.Errorf("No assets found for eks-anywhere-packages, or ecr-token-refresher in eksArtifacts")
}
return m, nil
}
| 485 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helm
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Requires struct {
Kind string `json:"kind,omitempty"`
Metadata metav1.ObjectMeta `json:"metadata,omitempty"`
Spec RequiresSpec `json:"spec,omitempty"`
}
type RequiresSpec struct {
Images []Image `json:"images,omitempty"`
Configurations []Configuration `json:"configurations,omitempty"`
Schema string `json:"schema,omitempty"`
}
type Configuration struct {
Name string `json:"name,omitempty"`
Required bool `json:"required,omitempty"`
Default string `json:"default,omitempty"`
}
type Image struct {
Repository string `json:"repository,omitempty"`
Tag string `json:"tag,omitempty"`
Digest string `json:"digest,omitempty"`
}
type DockerAuth struct {
Auths map[string]DockerAuthRegistry `json:"auths,omitempty"`
}
type DockerAuthRegistry struct {
Auth string `json:"auth"`
}
type DockerAuthFile struct {
Authfile string `json:"authfile"`
}
| 56 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package images
import (
"fmt"
"io"
"net/http"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
docker "github.com/fsouza/go-dockerclient"
"github.com/pkg/errors"
"sigs.k8s.io/yaml"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
assettypes "github.com/aws/eks-anywhere/release/pkg/assets/types"
"github.com/aws/eks-anywhere/release/pkg/aws/ecr"
"github.com/aws/eks-anywhere/release/pkg/aws/ecrpublic"
"github.com/aws/eks-anywhere/release/pkg/aws/s3"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
"github.com/aws/eks-anywhere/release/pkg/retrier"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
commandutils "github.com/aws/eks-anywhere/release/pkg/util/command"
)
func PollForExistence(devRelease bool, authConfig *docker.AuthConfiguration, imageUri, imageContainerRegistry, releaseEnvironment, branchName string) error {
repository, tag := artifactutils.SplitImageUri(imageUri, imageContainerRegistry)
var requestUrl string
if devRelease || releaseEnvironment == "development" {
requestUrl = fmt.Sprintf("https://%s:%s@%s/v2/%s/manifests/%s", authConfig.Username, authConfig.Password, imageContainerRegistry, repository, tag)
} else {
requestUrl = fmt.Sprintf("https://%s:%[email protected]/v2/%s/%s/manifests/%s", authConfig.Username, authConfig.Password, filepath.Base(imageContainerRegistry), repository, tag)
}
// Creating new GET request
req, err := http.NewRequest("GET", requestUrl, nil)
if err != nil {
return errors.Cause(err)
}
// Retrier for downloading source ECR images. This retrier has a max timeout of 60 minutes. It
// checks whether the error occured during download is an ImageNotFound error and retries the
// download operation for a maximum of 60 retries, with a wait time of 30 seconds per retry.
retrier := retrier.NewRetrier(60*time.Minute, retrier.WithRetryPolicy(func(totalRetries int, err error) (retry bool, wait time.Duration) {
if branchName == "main" && artifactutils.IsImageNotFoundError(err) && totalRetries < 60 {
return true, 30 * time.Second
}
return false, 0
}))
err = retrier.Retry(func() error {
var err error
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
bodyStr := string(body)
if strings.Contains(bodyStr, "MANIFEST_UNKNOWN") {
return fmt.Errorf("Requested image not found")
}
return nil
})
if err != nil {
return fmt.Errorf("retries exhausted waiting for source image %s to be available for copy: %v", imageUri, err)
}
return nil
}
func CopyToDestination(sourceAuthConfig, releaseAuthConfig *docker.AuthConfiguration, sourceImageUri, releaseImageUri string) error {
retrier := retrier.NewRetrier(60*time.Minute, retrier.WithRetryPolicy(func(totalRetries int, err error) (retry bool, wait time.Duration) {
if err != nil && totalRetries < 10 {
return true, 30 * time.Second
}
return false, 0
}))
sourceRegistryUsername := sourceAuthConfig.Username
sourceRegistryPassword := sourceAuthConfig.Password
releaseRegistryUsername := releaseAuthConfig.Username
releaseRegistryPassword := releaseAuthConfig.Password
err := retrier.Retry(func() error {
cmd := exec.Command("skopeo", "copy", "--src-creds", fmt.Sprintf("%s:%s", sourceRegistryUsername, sourceRegistryPassword), "--dest-creds", fmt.Sprintf("%s:%s", releaseRegistryUsername, releaseRegistryPassword), fmt.Sprintf("docker://%s", sourceImageUri), fmt.Sprintf("docker://%s", releaseImageUri), "-f", "oci", "--all")
out, err := commandutils.ExecCommand(cmd)
fmt.Println(out)
if err != nil {
return fmt.Errorf("executing skopeo copy command: %v", err)
}
return nil
})
if err != nil {
return fmt.Errorf("retries exhausted performing image copy from source to destination: %v", err)
}
return nil
}
func GetSourceImageURI(r *releasetypes.ReleaseConfig, name, repoName string, tagOptions map[string]string, imageTagConfiguration assettypes.ImageTagConfiguration, trimVersionSignifier, hasSeparateTagPerReleaseBranch bool) (string, string, error) {
var sourceImageUri string
var latestTag string
var err error
sourcedFromBranch := r.BuildRepoBranchName
if r.DevRelease || r.ReleaseEnvironment == "development" {
latestTag = artifactutils.GetLatestUploadDestination(r.BuildRepoBranchName)
if imageTagConfiguration.SourceLatestTagFromECR && !r.DryRun {
if (strings.Contains(name, "eks-anywhere-packages") || strings.Contains(name, "ecr-token-refresher")) && r.BuildRepoBranchName != "main" {
latestTag, _, err = ecr.FilterECRRepoByTagPrefix(r.SourceClients.ECR.EcrClient, repoName, "v0.0.0", false)
} else {
latestTag, err = ecr.GetLatestImageSha(r.SourceClients.ECR.EcrClient, repoName)
}
if err != nil {
return "", "", errors.Cause(err)
}
}
if imageTagConfiguration.NonProdSourceImageTagFormat != "" {
sourceImageTagPrefix := generateFormattedTagPrefix(imageTagConfiguration.NonProdSourceImageTagFormat, tagOptions)
sourceImageUri = fmt.Sprintf("%s/%s:%s-%s",
r.SourceContainerRegistry,
repoName,
sourceImageTagPrefix,
latestTag,
)
} else {
sourceImageUri = fmt.Sprintf("%s/%s:%s",
r.SourceContainerRegistry,
repoName,
latestTag,
)
}
if strings.HasSuffix(name, "-helm") || strings.HasSuffix(name, "-chart") {
sourceImageUri += "-helm"
}
if trimVersionSignifier {
sourceImageUri = strings.ReplaceAll(sourceImageUri, ":v", ":")
}
if !r.DryRun {
sourceEcrAuthConfig := r.SourceClients.ECR.AuthConfig
err := PollForExistence(r.DevRelease, sourceEcrAuthConfig, sourceImageUri, r.SourceContainerRegistry, r.ReleaseEnvironment, r.BuildRepoBranchName)
if err != nil {
if r.BuildRepoBranchName != "main" {
fmt.Printf("Tag corresponding to %s branch not found for %s image. Using image artifact from main\n", r.BuildRepoBranchName, repoName)
var gitTagFromMain string
if strings.Contains(name, "bottlerocket-bootstrap") {
gitTagFromMain = "non-existent"
} else {
gitTagPath := tagOptions["projectPath"]
if hasSeparateTagPerReleaseBranch {
gitTagPath = filepath.Join(tagOptions["projectPath"], tagOptions["eksDReleaseChannel"])
}
gitTagFromMain, err = filereader.ReadGitTag(gitTagPath, r.BuildRepoSource, "main")
if err != nil {
return "", "", errors.Cause(err)
}
}
sourceImageUri = strings.NewReplacer(r.BuildRepoBranchName, "latest", tagOptions["gitTag"], gitTagFromMain).Replace(sourceImageUri)
sourcedFromBranch = "main"
} else {
return "", "", errors.Cause(err)
}
}
}
} else if r.ReleaseEnvironment == "production" {
if imageTagConfiguration.ProdSourceImageTagFormat != "" {
sourceImageTagPrefix := generateFormattedTagPrefix(imageTagConfiguration.ProdSourceImageTagFormat, tagOptions)
sourceImageUri = fmt.Sprintf("%s/%s:%s-eks-a-%d",
r.SourceContainerRegistry,
repoName,
sourceImageTagPrefix,
r.BundleNumber,
)
} else {
sourceImageUri = fmt.Sprintf("%s/%s:%s-eks-a-%d",
r.SourceContainerRegistry,
repoName,
tagOptions["gitTag"],
r.BundleNumber,
)
}
if trimVersionSignifier {
sourceImageUri = strings.ReplaceAll(sourceImageUri, ":v", ":")
}
}
return sourceImageUri, sourcedFromBranch, nil
}
func GetReleaseImageURI(r *releasetypes.ReleaseConfig, name, repoName string, tagOptions map[string]string, imageTagConfiguration assettypes.ImageTagConfiguration, trimVersionSignifier, hasSeparateTagPerReleaseBranch bool) (string, error) {
var releaseImageUri string
if imageTagConfiguration.ReleaseImageTagFormat != "" {
releaseImageTagPrefix := generateFormattedTagPrefix(imageTagConfiguration.ReleaseImageTagFormat, tagOptions)
releaseImageUri = fmt.Sprintf("%s/%s:%s-eks-a",
r.ReleaseContainerRegistry,
repoName,
releaseImageTagPrefix,
)
} else {
releaseImageUri = fmt.Sprintf("%s/%s:%s-eks-a",
r.ReleaseContainerRegistry,
repoName,
tagOptions["gitTag"],
)
}
var semver string
if r.DevRelease {
if r.Weekly {
semver = r.DevReleaseUriVersion
} else {
currentSourceImageUri, _, err := GetSourceImageURI(r, name, repoName, tagOptions, imageTagConfiguration, trimVersionSignifier, hasSeparateTagPerReleaseBranch)
if err != nil {
return "", errors.Cause(err)
}
previousReleaseImageSemver, err := GetPreviousReleaseImageSemver(r, releaseImageUri)
if err != nil {
return "", errors.Cause(err)
}
if previousReleaseImageSemver == "" {
semver = r.DevReleaseUriVersion
} else {
fmt.Printf("Previous release image semver for %s image: %s\n", repoName, previousReleaseImageSemver)
previousReleaseImageUri := fmt.Sprintf("%s-%s", releaseImageUri, previousReleaseImageSemver)
sameDigest, err := CompareHashWithPreviousBundle(r, currentSourceImageUri, previousReleaseImageUri)
if err != nil {
return "", errors.Cause(err)
}
if sameDigest {
semver = previousReleaseImageSemver
fmt.Printf("Image digest for %s image has not changed, tagging with previous dev release semver: %s\n", repoName, semver)
} else {
buildNumber, err := filereader.NewBuildNumberFromLastVersion(previousReleaseImageSemver, "vDev", r.BuildRepoBranchName)
if err != nil {
return "", err
}
newSemver, err := filereader.GetCurrentEksADevReleaseVersion("vDev", r, buildNumber)
if err != nil {
return "", err
}
semver = strings.ReplaceAll(newSemver, "+", "-")
fmt.Printf("Image digest for %s image has changed, tagging with new dev release semver: %s\n", repoName, semver)
}
}
}
} else {
semver = fmt.Sprintf("%d", r.BundleNumber)
}
releaseImageUri = fmt.Sprintf("%s-%s", releaseImageUri, semver)
if trimVersionSignifier {
releaseImageUri = strings.ReplaceAll(releaseImageUri, ":v", ":")
}
return releaseImageUri, nil
}
func generateFormattedTagPrefix(imageTagFormat string, tagOptions map[string]string) string {
formattedTag := imageTagFormat
re := regexp.MustCompile(`<(\w+)>`)
searchResults := re.FindAllString(imageTagFormat, -1)
for _, result := range searchResults {
trimmedResult := strings.Trim(result, "<>")
formattedTag = strings.ReplaceAll(formattedTag, result, tagOptions[trimmedResult])
}
return formattedTag
}
func CompareHashWithPreviousBundle(r *releasetypes.ReleaseConfig, currentSourceImageUri, previousReleaseImageUri string) (bool, error) {
if r.DryRun {
return false, nil
}
fmt.Printf("Comparing digests for [%s] and [%s]\n", currentSourceImageUri, previousReleaseImageUri)
currentSourceImageUriDigest, err := ecr.GetImageDigest(currentSourceImageUri, r.SourceContainerRegistry, r.SourceClients.ECR.EcrClient)
if err != nil {
return false, errors.Cause(err)
}
previousReleaseImageUriDigest, err := ecrpublic.GetImageDigest(previousReleaseImageUri, r.ReleaseContainerRegistry, r.ReleaseClients.ECRPublic.Client)
if err != nil {
return false, errors.Cause(err)
}
return currentSourceImageUriDigest == previousReleaseImageUriDigest, nil
}
func GetPreviousReleaseImageSemver(r *releasetypes.ReleaseConfig, releaseImageUri string) (string, error) {
var semver string
if r.DryRun {
semver = "v0.0.0-dev-build.0"
} else {
bundles := &anywherev1alpha1.Bundles{}
bundleReleaseManifestKey := artifactutils.GetManifestFilepaths(r.DevRelease, r.Weekly, r.BundleNumber, constants.BundlesKind, r.BuildRepoBranchName, r.ReleaseDate)
bundleManifestUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", r.ReleaseBucket, bundleReleaseManifestKey)
if s3.KeyExists(r.ReleaseBucket, bundleReleaseManifestKey) {
contents, err := filereader.ReadHttpFile(bundleManifestUrl)
if err != nil {
return "", fmt.Errorf("Error reading bundle manifest from S3: %v", err)
}
if err = yaml.Unmarshal(contents, bundles); err != nil {
return "", fmt.Errorf("Error unmarshaling bundles manifest from [%s]: %v", bundleManifestUrl, err)
}
for _, versionedBundle := range bundles.Spec.VersionsBundles {
vbImages := versionedBundle.Images()
for _, image := range vbImages {
if strings.Contains(image.URI, releaseImageUri) {
imageUri := image.URI
var differential int
if r.BuildRepoBranchName == "main" {
differential = 1
} else {
differential = 2
}
numDashes := strings.Count(imageUri, "-")
splitIndex := numDashes - strings.Count(r.BuildRepoBranchName, "-") - differential
imageUriSplit := strings.SplitAfterN(imageUri, "-", splitIndex)
semver = imageUriSplit[len(imageUriSplit)-1]
}
}
}
}
}
return semver, nil
}
| 355 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operations
import (
"fmt"
"github.com/pkg/errors"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/assets"
"github.com/aws/eks-anywhere/release/pkg/aws/ecrpublic"
"github.com/aws/eks-anywhere/release/pkg/bundles"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
func GenerateBundleArtifactsTable(r *releasetypes.ReleaseConfig) (map[string][]releasetypes.Artifact, error) {
fmt.Println("\n==========================================================")
fmt.Println(" Bundle Artifacts Table Generation")
fmt.Println("==========================================================")
eksDReleaseMap, err := filereader.ReadEksDReleases(r)
if err != nil {
return nil, err
}
supportedK8sVersions, err := filereader.GetSupportedK8sVersions(r)
if err != nil {
return nil, errors.Wrapf(err, "Error getting supported Kubernetes versions for bottlerocket")
}
artifactsTable, err := assets.GetBundleReleaseAssets(supportedK8sVersions, eksDReleaseMap, r)
if err != nil {
return nil, errors.Wrapf(err, "Error getting bundle release assets")
}
fmt.Printf("%s Successfully generated bundle artifacts table\n", constants.SuccessIcon)
return artifactsTable, nil
}
func BundleArtifactsRelease(r *releasetypes.ReleaseConfig) error {
fmt.Println("\n==========================================================")
fmt.Println(" Bundle Artifacts Release")
fmt.Println("==========================================================")
err := DownloadArtifacts(r, r.BundleArtifactsTable)
if err != nil {
return errors.Cause(err)
}
err = RenameArtifacts(r, r.BundleArtifactsTable)
if err != nil {
return errors.Cause(err)
}
err = UploadArtifacts(r, r.BundleArtifactsTable)
if err != nil {
return errors.Cause(err)
}
return nil
}
func GenerateImageDigestsTable(r *releasetypes.ReleaseConfig) (map[string]string, error) {
fmt.Println("\n==========================================================")
fmt.Println(" Image Digests Table Generation")
fmt.Println("==========================================================")
imageDigests := make(map[string]string)
for _, artifacts := range r.BundleArtifactsTable {
for _, artifact := range artifacts {
if artifact.Image != nil {
var imageDigestStr string
var err error
if r.DryRun {
sha256sum, err := artifactutils.GetFakeSHA(256)
if err != nil {
return nil, errors.Cause(err)
}
imageDigestStr = fmt.Sprintf("sha256:%s", sha256sum)
} else {
imageDigestStr, err = ecrpublic.GetImageDigest(artifact.Image.ReleaseImageURI, r.ReleaseContainerRegistry, r.ReleaseClients.ECRPublic.Client)
if err != nil {
return nil, errors.Cause(err)
}
}
imageDigests[artifact.Image.ReleaseImageURI] = imageDigestStr
fmt.Printf("Image digest for %s - %s\n", artifact.Image.ReleaseImageURI, imageDigestStr)
}
}
}
fmt.Printf("%s Successfully generated image digests table\n", constants.SuccessIcon)
return imageDigests, nil
}
func GenerateBundleSpec(r *releasetypes.ReleaseConfig, bundle *anywherev1alpha1.Bundles, imageDigests map[string]string) error {
fmt.Println("\n==========================================================")
fmt.Println(" Bundles Manifest Spec Generation")
fmt.Println("==========================================================")
versionsBundles, err := bundles.GetVersionsBundles(r, imageDigests)
if err != nil {
return err
}
bundle.Spec.VersionsBundles = versionsBundles
fmt.Printf("%s Successfully generated bundle manifest spec\n", constants.SuccessIcon)
return nil
}
| 127 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operations
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
anywherev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/bundles"
"github.com/aws/eks-anywhere/release/pkg/filereader"
"github.com/aws/eks-anywhere/release/pkg/git"
"github.com/aws/eks-anywhere/release/pkg/test"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
const (
releaseFolder = "release"
testdataFolder = "pkg/test/testdata"
generatedBundleFolder = "generated-bundles"
)
var releaseConfig = &releasetypes.ReleaseConfig{
CliRepoSource: "eks-a-build",
BuildRepoSource: "eks-a-cli",
CliRepoUrl: "https://github.com/aws/eks-anywhere.git",
BuildRepoUrl: "https://github.com/aws/eks-anywhere-build-tooling.git",
SourceBucket: "projectbuildpipeline-857-pipelineoutputartifactsb-10ajmk30khe3f",
ReleaseBucket: "release-bucket",
SourceContainerRegistry: "sourceContainerRegistry",
ReleaseContainerRegistry: "public.ecr.aws/release-container-registry",
CDN: "https://release-bucket",
BundleNumber: 1,
ReleaseNumber: 1,
ReleaseVersion: "vDev",
ReleaseTime: time.Unix(0, 0),
DevRelease: true,
DryRun: true,
}
var update = flag.Bool("update", false, "update the golden files of this test")
func TestGenerateBundleManifest(t *testing.T) {
testCases := []struct {
testName string
buildRepoBranchName string
cliRepoBranchName string
cliMinVersion string
cliMaxVersion string
}{
{
testName: "Dev-release from main",
buildRepoBranchName: "main",
cliRepoBranchName: "main",
cliMinVersion: "v0.16.0",
cliMaxVersion: "v0.16.0",
},
{
testName: "Dev-release from release-0.16",
buildRepoBranchName: "release-0.16",
cliRepoBranchName: "release-0.16",
cliMinVersion: "v0.16.0",
cliMaxVersion: "v0.16.0",
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
homeDir, err := os.UserHomeDir()
if err != nil {
t.Fatalf("Error getting home directory: %v\n", err)
}
parentSourceDir := filepath.Join(homeDir, "eks-a-source")
err = os.RemoveAll(parentSourceDir)
if err != nil {
t.Fatalf("Error removing source directory: %v\n", err)
}
gitRoot, err := git.GetRepoRoot()
if err != nil {
t.Fatalf("Error getting top-level Git directory: %v\n", err)
}
generatedBundlePath := filepath.Join(gitRoot, releaseFolder, generatedBundleFolder)
if err := os.MkdirAll(generatedBundlePath, 0o755); err != nil {
t.Fatalf("Error creating directory at %s for bundle generation: %v\n", generatedBundleFolder, err)
}
releaseConfig.BuildRepoBranchName = tt.buildRepoBranchName
releaseConfig.CliRepoBranchName = tt.cliRepoBranchName
releaseVersion, err := filereader.GetCurrentEksADevReleaseVersion(releaseConfig.ReleaseVersion, releaseConfig, 0)
if err != nil {
t.Fatalf("Error getting previous EKS-A dev release number: %v\n", err)
}
releaseConfig.ReleaseVersion = releaseVersion
releaseConfig.DevReleaseUriVersion = strings.ReplaceAll(releaseVersion, "+", "-")
err = os.RemoveAll(releaseConfig.ArtifactDir)
if err != nil {
t.Fatalf("Error removing local artifacts directory: %v\n", err)
}
err = SetRepoHeads(releaseConfig)
if err != nil {
t.Fatalf("Error getting heads of code repositories: %v\n", err)
}
bundleArtifactsTable, err := GenerateBundleArtifactsTable(releaseConfig)
if err != nil {
t.Fatalf("Error getting bundle artifacts data: %v\n", err)
}
releaseConfig.BundleArtifactsTable = bundleArtifactsTable
imageDigests, err := GenerateImageDigestsTable(releaseConfig)
if err != nil {
t.Fatalf("Error generating image digests table: %+v\n", err)
}
bundle := bundles.NewBaseBundles(releaseConfig)
bundle.Spec.CliMinVersion = tt.cliMinVersion
bundle.Spec.CliMaxVersion = tt.cliMaxVersion
err = GenerateBundleSpec(releaseConfig, bundle, imageDigests)
if err != nil {
t.Fatalf("Error generating bundles manifest: %+v\n", err)
}
bundleManifest, err := yaml.Marshal(bundle)
if err != nil {
t.Fatalf("Error marshaling bundles manifest: %+v\n", err)
}
expectedBundleManifestFile := filepath.Join(gitRoot, releaseFolder, testdataFolder, fmt.Sprintf("%s-bundle-release.yaml", tt.buildRepoBranchName))
generatedBundleManifestFile := filepath.Join(generatedBundlePath, fmt.Sprintf("%s-dry-run-bundle-release.yaml", tt.buildRepoBranchName))
err = os.WriteFile(generatedBundleManifestFile, bundleManifest, 0o644)
if err != nil {
t.Fatalf("Error writing bundles manifest file to disk: %v\n", err)
}
test.CheckFilesEquals(t, generatedBundleManifestFile, expectedBundleManifestFile, *update)
})
}
}
func TestReleaseConfigNewBundlesName(t *testing.T) {
testCases := []struct {
testName string
releaseConfig *releasetypes.ReleaseConfig
want string
}{
{
testName: "number 2",
releaseConfig: &releasetypes.ReleaseConfig{
BundleNumber: 2,
},
want: "bundles-2",
},
{
testName: "no bundle number",
releaseConfig: &releasetypes.ReleaseConfig{},
want: "bundles-0",
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(bundles.NewBundlesName(tt.releaseConfig)).To(Equal(tt.want))
})
}
}
func TestReleaseConfigNewBaseBundles(t *testing.T) {
g := NewWithT(t)
now := time.Now()
releaseConfig := &releasetypes.ReleaseConfig{
BundleNumber: 10,
ReleaseTime: now,
}
wantBundles := &anywherev1alpha1.Bundles{
TypeMeta: metav1.TypeMeta{
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
Kind: "Bundles",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bundles-10",
CreationTimestamp: metav1.Time{Time: now},
},
Spec: anywherev1alpha1.BundlesSpec{
Number: 10,
},
}
g.Expect(bundles.NewBaseBundles(releaseConfig)).To(Equal(wantBundles))
}
| 221 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operations
import (
"fmt"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/aws/s3"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/filereader"
"github.com/aws/eks-anywhere/release/pkg/retrier"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
artifactutils "github.com/aws/eks-anywhere/release/pkg/util/artifacts"
)
func DownloadArtifacts(r *releasetypes.ReleaseConfig, eksArtifacts map[string][]releasetypes.Artifact) error {
// Retrier for downloading source S3 objects. This retrier has a max timeout of 60 minutes. It
// checks whether the error occured during download is an ObjectNotFound error and retries the
// download operation for a maximum of 60 retries, with a wait time of 30 seconds per retry.
s3Retrier := retrier.NewRetrier(60*time.Minute, retrier.WithRetryPolicy(func(totalRetries int, err error) (retry bool, wait time.Duration) {
if r.BuildRepoBranchName == "main" && artifactutils.IsObjectNotFoundError(err) && totalRetries < 60 {
return true, 30 * time.Second
}
return false, 0
}))
fmt.Println("==========================================================")
fmt.Println(" Artifacts Download")
fmt.Println("==========================================================")
for _, artifacts := range eksArtifacts {
for _, artifact := range artifacts {
// Check if there is an archive to be downloaded
if artifact.Archive != nil {
sourceS3Prefix := artifact.Archive.SourceS3Prefix
sourceS3Key := artifact.Archive.SourceS3Key
artifactPath := artifact.Archive.ArtifactPath
objectKey := filepath.Join(sourceS3Prefix, sourceS3Key)
objectLocalFilePath := filepath.Join(artifactPath, sourceS3Key)
fmt.Printf("Archive - %s\n", objectKey)
if r.DryRun && artifact.Archive.ImageFormat != "tarball" {
fmt.Println("Skipping OS image downloads in dry-run mode")
continue
}
err := s3Retrier.Retry(func() error {
if !s3.KeyExists(r.SourceBucket, objectKey) {
return fmt.Errorf("Requested object not found")
}
return nil
})
if err != nil {
if r.BuildRepoBranchName != "main" {
var latestSourceS3PrefixFromMain string
fmt.Printf("Artifact corresponding to %s branch not found for %s archive. Using artifact from main\n", r.BuildRepoBranchName, sourceS3Key)
if strings.Contains(sourceS3Key, "eksctl-anywhere") {
latestSourceS3PrefixFromMain = strings.NewReplacer(r.CliRepoBranchName, "latest").Replace(sourceS3Prefix)
} else {
gitTagFromMain, err := filereader.ReadGitTag(artifact.Archive.ProjectPath, r.BuildRepoSource, "main")
if err != nil {
return errors.Cause(err)
}
latestSourceS3PrefixFromMain = strings.NewReplacer(r.BuildRepoBranchName, "latest", artifact.Archive.GitTag, gitTagFromMain).Replace(sourceS3Prefix)
}
objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key)
} else {
return fmt.Errorf("retries exhausted waiting for archive to be uploaded to source location: %v", err)
}
}
err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey)
if err != nil {
return errors.Cause(err)
}
// Download checksum files for the archive
checksumExtensions := []string{
".sha256",
".sha512",
}
// Adding a special case for tinkerbell/hook project.
// The project builds linux kernel files that are not stored as tarballs and currently do not have SHA checksums.
// TODO(pokearu): Add logic to generate SHA for hook project
if artifact.Archive.ProjectPath == constants.HookProjectPath {
checksumExtensions = []string{}
}
for _, extension := range checksumExtensions {
objectShasumFileName := fmt.Sprintf("%s%s", sourceS3Key, extension)
objectShasumFileKey := filepath.Join(sourceS3Prefix, objectShasumFileName)
objectShasumFileLocalFilePath := filepath.Join(artifactPath, objectShasumFileName)
fmt.Printf("Checksum file - %s\n", objectShasumFileKey)
err := s3Retrier.Retry(func() error {
if !s3.KeyExists(r.SourceBucket, objectShasumFileKey) {
return fmt.Errorf("Requested object not found")
}
return nil
})
if err != nil {
if r.BuildRepoBranchName != "main" {
var latestSourceS3PrefixFromMain string
fmt.Printf("Artifact corresponding to %s branch not found for %s archive. Using artifact from main\n", r.BuildRepoBranchName, sourceS3Key)
if strings.Contains(sourceS3Key, "eksctl-anywhere") {
latestSourceS3PrefixFromMain = strings.NewReplacer(r.CliRepoBranchName, "latest").Replace(sourceS3Prefix)
} else {
gitTagFromMain, err := filereader.ReadGitTag(artifact.Archive.ProjectPath, r.BuildRepoSource, "main")
if err != nil {
return errors.Cause(err)
}
latestSourceS3PrefixFromMain = strings.NewReplacer(r.BuildRepoBranchName, "latest", artifact.Archive.GitTag, gitTagFromMain).Replace(sourceS3Prefix)
}
objectShasumFileKey = filepath.Join(latestSourceS3PrefixFromMain, objectShasumFileName)
} else {
return fmt.Errorf("retries exhausted waiting for checksum file to be uploaded to source location: %v", err)
}
}
err = s3.DownloadFile(objectShasumFileLocalFilePath, r.SourceBucket, objectShasumFileKey)
if err != nil {
return errors.Cause(err)
}
}
}
// Check if there is a manifest to be downloaded
if artifact.Manifest != nil {
sourceS3Prefix := artifact.Manifest.SourceS3Prefix
sourceS3Key := artifact.Manifest.SourceS3Key
artifactPath := artifact.Manifest.ArtifactPath
objectKey := filepath.Join(sourceS3Prefix, sourceS3Key)
objectLocalFilePath := filepath.Join(artifactPath, sourceS3Key)
fmt.Printf("Manifest - %s\n", objectKey)
err := s3Retrier.Retry(func() error {
if !s3.KeyExists(r.SourceBucket, objectKey) {
return fmt.Errorf("Requested object not found")
}
return nil
})
if err != nil {
if r.BuildRepoBranchName != "main" {
fmt.Printf("Artifact corresponding to %s branch not found for %s manifest. Using artifact from main\n", r.BuildRepoBranchName, sourceS3Key)
gitTagFromMain, err := filereader.ReadGitTag(artifact.Manifest.ProjectPath, r.BuildRepoSource, "main")
if err != nil {
return errors.Cause(err)
}
latestSourceS3PrefixFromMain := strings.NewReplacer(r.BuildRepoBranchName, "latest", artifact.Manifest.GitTag, gitTagFromMain).Replace(sourceS3Prefix)
objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key)
} else {
return fmt.Errorf("retries exhausted waiting for archive to be uploaded to source location: %v", err)
}
}
err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey)
if err != nil {
return errors.Cause(err)
}
}
}
}
fmt.Printf("%s Successfully downloaded artifacts\n", constants.SuccessIcon)
return nil
}
| 183 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operations
import (
"fmt"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/bundles"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func GenerateEksAArtifactsTable(r *releasetypes.ReleaseConfig) (map[string][]releasetypes.Artifact, error) {
fmt.Println("\n==========================================================")
fmt.Println(" EKS-A Artifacts Table Generation")
fmt.Println("==========================================================")
artifactsTable := map[string][]releasetypes.Artifact{}
artifacts, err := bundles.GetEksACliArtifacts(r)
if err != nil {
return nil, errors.Wrapf(err, "Error getting artifact information for EKS-A CLI")
}
artifactsTable["eks-a-cli"] = artifacts
fmt.Printf("%s Successfully generated EKS-A artifacts table\n", constants.SuccessIcon)
return artifactsTable, nil
}
func EksAArtifactsRelease(r *releasetypes.ReleaseConfig) error {
fmt.Println("\n==========================================================")
fmt.Println(" EKS-A CLI Artifacts Release")
fmt.Println("==========================================================")
err := DownloadArtifacts(r, r.EksAArtifactsTable)
if err != nil {
return errors.Cause(err)
}
err = RenameArtifacts(r, r.EksAArtifactsTable)
if err != nil {
return errors.Cause(err)
}
err = UploadArtifacts(r, r.EksAArtifactsTable)
if err != nil {
return errors.Cause(err)
}
return nil
}
| 66 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operations
import (
"fmt"
"os"
"path/filepath"
"regexp"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/constants"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func RenameArtifacts(r *releasetypes.ReleaseConfig, artifacts map[string][]releasetypes.Artifact) error {
fmt.Println("\n==========================================================")
fmt.Println(" Artifacts Rename")
fmt.Println("==========================================================")
for _, artifactsList := range artifacts {
for _, artifact := range artifactsList {
// Change the name of the archive along with the checksum files
if artifact.Archive != nil {
if r.DryRun && artifact.Archive.ImageFormat != "tarball" {
fmt.Println("Skipping OS image renames in dry-run mode")
continue
}
archiveArtifact := artifact.Archive
oldArtifactFile := filepath.Join(archiveArtifact.ArtifactPath, archiveArtifact.SourceS3Key)
newArtifactFile := filepath.Join(archiveArtifact.ArtifactPath, archiveArtifact.ReleaseName)
fmt.Printf("Renaming archive - %s\n", newArtifactFile)
err := os.Rename(oldArtifactFile, newArtifactFile)
if err != nil {
return errors.Cause(err)
}
// Change the names of the checksum files
checksumExtensions := []string{".sha256", ".sha512"}
// Adding a special case for tinkerbell/hook project.
// The project builds linux kernel files that are not stored as tarballs and currently do not have SHA checksums.
// TODO(pokearu): Add logic to generate SHA for hook project
if artifact.Archive.ProjectPath == constants.HookProjectPath {
checksumExtensions = []string{}
}
for _, extension := range checksumExtensions {
oldChecksumFile := oldArtifactFile + extension
newChecksumFile := newArtifactFile + extension
fmt.Printf("Renaming checksum file - %s\n", newChecksumFile)
err = os.Rename(oldChecksumFile, newChecksumFile)
if err != nil {
return errors.Cause(err)
}
}
}
// Override images in the manifest with release URIs
if artifact.Manifest != nil {
manifestArtifact := artifact.Manifest
oldArtifactFile := filepath.Join(manifestArtifact.ArtifactPath, manifestArtifact.SourceS3Key)
newArtifactFile := filepath.Join(manifestArtifact.ArtifactPath, manifestArtifact.ReleaseName)
fmt.Printf("Renaming manifest - %s\n", newArtifactFile)
err := os.Rename(oldArtifactFile, newArtifactFile)
if err != nil {
return errors.Cause(err)
}
for _, imageTagOverride := range manifestArtifact.ImageTagOverrides {
manifestFileContents, err := os.ReadFile(newArtifactFile)
if err != nil {
return errors.Cause(err)
}
regex := fmt.Sprintf("%s/.*%s.*", r.SourceContainerRegistry, imageTagOverride.Repository)
compiledRegex := regexp.MustCompile(regex)
fmt.Printf("Overriding image to %s in manifest %s\n", imageTagOverride.ReleaseUri, newArtifactFile)
updatedManifestFileContents := compiledRegex.ReplaceAllString(string(manifestFileContents), imageTagOverride.ReleaseUri)
err = os.WriteFile(newArtifactFile, []byte(updatedManifestFileContents), 0o644)
if err != nil {
return errors.Cause(err)
}
}
}
}
}
fmt.Printf("%s Successfully renamed artifacts\n", constants.SuccessIcon)
return nil
}
| 104 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operations
import (
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/git"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func SetRepoHeads(r *releasetypes.ReleaseConfig) error {
fmt.Println("\n==========================================================")
fmt.Println(" Local Repository Setup")
fmt.Println("==========================================================")
// Get the repos from env var
if r.CliRepoUrl == "" || r.BuildRepoUrl == "" {
return fmt.Errorf("One or both clone URLs are empty")
}
homeDir, err := os.UserHomeDir()
if err != nil {
return errors.Cause(err)
}
parentSourceDir := filepath.Join(homeDir, "eks-a-source")
// Clone the CLI repository
fmt.Println("Cloning CLI repository")
r.CliRepoSource = filepath.Join(parentSourceDir, "eks-a-cli")
out, err := git.CloneRepo(r.CliRepoUrl, r.CliRepoSource)
fmt.Println(out)
if err != nil {
return errors.Cause(err)
}
// Clone the build-tooling repository
fmt.Println("Cloning build-tooling repository")
r.BuildRepoSource = filepath.Join(parentSourceDir, "eks-a-build")
out, err = git.CloneRepo(r.BuildRepoUrl, r.BuildRepoSource)
fmt.Println(out)
if err != nil {
return errors.Cause(err)
}
if r.BuildRepoBranchName != "main" {
fmt.Printf("Checking out build-tooling repo at branch %s\n", r.BuildRepoBranchName)
out, err = git.CheckoutRepo(r.BuildRepoSource, r.BuildRepoBranchName)
fmt.Println(out)
if err != nil {
return errors.Cause(err)
}
}
if r.CliRepoBranchName != "main" {
fmt.Printf("Checking out CLI repo at branch %s\n", r.CliRepoBranchName)
out, err = git.CheckoutRepo(r.CliRepoSource, r.CliRepoBranchName)
fmt.Println(out)
if err != nil {
return errors.Cause(err)
}
}
// Set HEADs of the repos
r.CliRepoHead, err = git.GetHead(r.CliRepoSource)
if err != nil {
return errors.Cause(err)
}
fmt.Printf("Head of cli repo: %s\n", r.CliRepoHead)
r.BuildRepoHead, err = git.GetHead(r.BuildRepoSource)
if err != nil {
return errors.Cause(err)
}
fmt.Printf("Head of build repo: %s\n", r.BuildRepoHead)
fmt.Printf("%s Successfully completed local repository setup\n", constants.SuccessIcon)
return nil
}
| 97 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operations
import (
"fmt"
"path/filepath"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/release/pkg/aws/s3"
"github.com/aws/eks-anywhere/release/pkg/constants"
"github.com/aws/eks-anywhere/release/pkg/helm"
"github.com/aws/eks-anywhere/release/pkg/images"
releasetypes "github.com/aws/eks-anywhere/release/pkg/types"
)
func UploadArtifacts(r *releasetypes.ReleaseConfig, eksArtifacts map[string][]releasetypes.Artifact) error {
fmt.Println("\n==========================================================")
fmt.Println(" Artifacts Upload")
fmt.Println("==========================================================")
if r.DryRun {
fmt.Println("Skipping artifacts upload in dry-run mode")
return nil
}
sourceEcrAuthConfig := r.SourceClients.ECR.AuthConfig
releaseEcrAuthConfig := r.ReleaseClients.ECRPublic.AuthConfig
for _, artifacts := range eksArtifacts {
for _, artifact := range artifacts {
if artifact.Archive != nil {
archiveFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName)
fmt.Printf("Archive - %s\n", archiveFile)
key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName)
err := s3.UploadFile(archiveFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader)
if err != nil {
return errors.Cause(err)
}
checksumExtensions := []string{".sha256", ".sha512"}
// Adding a special case for tinkerbell/hook project.
// The project builds linux kernel files that are not stored as tarballs and currently do not have SHA checksums.
// TODO(pokearu): Add logic to generate SHA for hook project
if artifact.Archive.ProjectPath == constants.HookProjectPath {
checksumExtensions = []string{}
}
for _, extension := range checksumExtensions {
checksumFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName) + extension
fmt.Printf("Checksum - %s\n", checksumFile)
key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName) + extension
err := s3.UploadFile(checksumFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader)
if err != nil {
return errors.Cause(err)
}
}
}
if artifact.Manifest != nil {
manifestFile := filepath.Join(artifact.Manifest.ArtifactPath, artifact.Manifest.ReleaseName)
fmt.Printf("Manifest - %s\n", manifestFile)
key := filepath.Join(artifact.Manifest.ReleaseS3Path, artifact.Manifest.ReleaseName)
err := s3.UploadFile(manifestFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader)
if err != nil {
return errors.Cause(err)
}
}
if artifact.Image != nil {
// If the artifact is a helm chart, skip the skopeo copy. Instead, modify the Chart.yaml to match the release tag
// and then use Helm package and push commands to upload chart to ECR Public
if !r.DryRun && ((strings.HasSuffix(artifact.Image.AssetName, "helm") && !r.DevRelease) || strings.HasSuffix(artifact.Image.AssetName, "chart")) {
// Trim -helm on the packages helm chart, but don't need to trim tinkerbell chart since the AssetName is the same as the repoName
trimmedAsset := strings.TrimSuffix(artifact.Image.AssetName, "-helm")
helmDriver, err := helm.NewHelm()
if err != nil {
return fmt.Errorf("creating helm client: %v", err)
}
fmt.Printf("Modifying helm chart for %s\n", trimmedAsset)
helmDest, err := helm.GetHelmDest(helmDriver, r, artifact.Image.SourceImageURI, trimmedAsset)
if err != nil {
return fmt.Errorf("getting Helm destination: %v", err)
}
fmt.Printf("Pulled helm chart locally to %s\n", helmDest)
err = helm.ModifyAndPushChartYaml(*artifact.Image, r, helmDriver, helmDest, eksArtifacts, nil)
if err != nil {
return fmt.Errorf("modifying Chart.yaml and pushing Helm chart to destination: %v", err)
}
continue
}
sourceImageUri := artifact.Image.SourceImageURI
releaseImageUri := artifact.Image.ReleaseImageURI
fmt.Printf("Source Image - %s\n", sourceImageUri)
fmt.Printf("Destination Image - %s\n", releaseImageUri)
err := images.CopyToDestination(sourceEcrAuthConfig, releaseEcrAuthConfig, sourceImageUri, releaseImageUri)
if err != nil {
return fmt.Errorf("copying image from source to destination: %v", err)
}
}
}
}
fmt.Printf("%s Successfully uploaded artifacts\n", constants.SuccessIcon)
return nil
}
| 125 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package retrier
import (
"fmt"
"time"
)
type Retrier struct {
retryPolicy RetryPolicy
timeout time.Duration
}
type (
// RetryPolicy allows to customize the retrying logic. The boolean retry indicates if a new retry
// should be performed and the wait duration indicates the wait time before the next retry.
RetryPolicy func(totalRetries int, err error) (retry bool, wait time.Duration)
RetrierOpt func(*Retrier)
)
// NewRetrier creates a new retrier with a global timeout (max time allowed for the whole execution)
// The default retry policy is to always retry with no wait time in between retries.
func NewRetrier(timeout time.Duration, opts ...RetrierOpt) *Retrier {
r := &Retrier{
timeout: timeout,
retryPolicy: zeroWaitPolicy,
}
for _, opt := range opts {
opt(r)
}
return r
}
func WithRetryPolicy(policy RetryPolicy) RetrierOpt {
return func(r *Retrier) {
r.retryPolicy = policy
}
}
// Retry runs the fn function until it either successful completes (not error),
// the set timeout reached or the retry policy aborts the execution.
func (r *Retrier) Retry(fn func() error) error {
start := time.Now()
retries := 0
var err error
for retry := true; retry; retry = time.Since(start) < r.timeout {
err = fn()
retries += 1
if err == nil {
fmt.Printf("Retry execution successful with %d retries in duration %v\n", retries, time.Since(start))
return nil
}
fmt.Printf("Error happened during retry after %d retries: %v\n", retries, err)
retry, wait := r.retryPolicy(retries, err)
if !retry {
fmt.Println("Execution aborted by retry policy")
return err
}
fmt.Printf("Sleeping before next retry: duration - %v\n", wait)
time.Sleep(wait)
}
fmt.Printf("Timeout reached after %d retries in duration %v. Returning error: %v\n", retries, time.Since(start), err)
return err
}
func zeroWaitPolicy(_ int, _ error) (retry bool, wait time.Duration) {
return true, 0
}
| 87 |
eks-anywhere | aws | Go | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"os"
"os/exec"
"testing"
commandutils "github.com/aws/eks-anywhere/release/pkg/util/command"
)
func CheckFilesEquals(t *testing.T, actualPath, expectedPath string, update bool) {
t.Helper()
actualContent, err := readFile(actualPath)
if err != nil {
t.Fatalf("Error reading actual path %s:\n%v", actualPath, err)
}
if update {
err = os.WriteFile(expectedPath, []byte(actualContent), 0o644)
if err != nil {
t.Fatalf("Error updating testdata bundle: %v\n", err)
}
}
expectedContent, err := readFile(expectedPath)
if err != nil {
t.Fatalf("Error reading expected path %s:\n%v", expectedPath, err)
}
if actualContent != expectedContent {
diffCmd := exec.Command("diff", expectedPath, actualPath)
diff, err := commandutils.ExecCommand(diffCmd)
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ExitCode() == 1 {
t.Fatalf("Actual file differs from expected:\n%s", string(diff))
}
}
}
t.Fatalf("Actual and expected files are different, actual =\n %s \n expected =\n %s\n%s", actualContent, expectedContent, err)
}
}
func readFile(filepath string) (string, error) {
data, err := os.ReadFile(filepath)
if err != nil {
return "", err
}
return string(data), nil
}
| 65 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.