repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package tinkerbell_test
import (
"errors"
"net"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/onsi/gomega"
"github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
"github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/networkutils/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestAssertMachineConfigsValid_ValidSucceds(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
g.Expect(tinkerbell.AssertMachineConfigsValid(clusterSpec)).To(gomega.Succeed())
}
func TestAssertMachineConfigsValid_InvalidFails(t *testing.T) {
// Invalidate the namespace check.
for name, mutate := range map[string]func(*tinkerbell.ClusterSpec){
"MissingName": func(clusterSpec *tinkerbell.ClusterSpec) {
clusterSpec.ControlPlaneMachineConfig().Name = ""
},
"MissingHardwareSelector": func(clusterSpec *tinkerbell.ClusterSpec) {
clusterSpec.ControlPlaneMachineConfig().Spec.HardwareSelector = map[string]string{}
},
"MultipleKeyValuePairsInHardwareSelector": func(clusterSpec *tinkerbell.ClusterSpec) {
clusterSpec.ControlPlaneMachineConfig().Spec.HardwareSelector = map[string]string{
"foo": "bar",
"baz": "qux",
}
},
"MissingUsers": func(clusterSpec *tinkerbell.ClusterSpec) {
clusterSpec.ControlPlaneMachineConfig().Spec.Users = []eksav1alpha1.UserConfiguration{}
},
} {
t.Run(name, func(t *testing.T) {
g := gomega.NewWithT(t)
spec := NewDefaultValidClusterSpecBuilder().Build()
mutate(spec)
g.Expect(tinkerbell.AssertMachineConfigsValid(spec)).ToNot(gomega.Succeed())
})
}
}
func TestAssertDatacenterConfigValid_ValidSucceeds(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
g.Expect(tinkerbell.AssertDatacenterConfigValid(clusterSpec)).To(gomega.Succeed())
}
func TestAssertDatacenterConfigValid_InvalidFails(t *testing.T) {
g := gomega.NewWithT(t)
for name, mutate := range map[string]func(*tinkerbell.ClusterSpec){
"NoObjectName": func(c *tinkerbell.ClusterSpec) {
c.DatacenterConfig.ObjectMeta.Name = ""
},
"NoTinkerbellIP": func(c *tinkerbell.ClusterSpec) {
c.DatacenterConfig.Spec.TinkerbellIP = ""
},
"TinkerbellIPInvalid": func(c *tinkerbell.ClusterSpec) {
c.DatacenterConfig.Spec.TinkerbellIP = "invalid"
},
} {
t.Run(name, func(t *testing.T) {
cluster := NewDefaultValidClusterSpecBuilder().Build()
mutate(cluster)
g.Expect(tinkerbell.AssertDatacenterConfigValid(cluster)).ToNot(gomega.Succeed())
})
}
}
func TestAssertDatacenterConfigValidEmptyOSImageURL(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.DatacenterConfig.Spec.OSImageURL = "test"
g.Expect(tinkerbell.AssertDatacenterConfigValid(clusterSpec)).To(gomega.MatchError("parsing osImageOverride: parse \"test\": invalid URI for request"))
}
func TestAssertDatacenterConfigValidEmptyHookImagesURLPath(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.DatacenterConfig.Spec.HookImagesURLPath = "test"
g.Expect(tinkerbell.AssertDatacenterConfigValid(clusterSpec)).To(gomega.MatchError("parsing hookOverride: parse \"test\": invalid URI for request"))
}
func TestAssertMachineConfigNamespaceMatchesDatacenterConfig_Same(t *testing.T) {
g := gomega.NewWithT(t)
builder := NewDefaultValidClusterSpecBuilder()
clusterSpec := builder.Build()
err := tinkerbell.AssertMachineConfigNamespaceMatchesDatacenterConfig(clusterSpec)
g.Expect(err).To(gomega.Succeed())
}
func TestAssertMachineConfigNamespaceMatchesDatacenterConfig_Different(t *testing.T) {
g := gomega.NewWithT(t)
builder := NewDefaultValidClusterSpecBuilder()
clusterSpec := builder.Build()
// Invalidate the namespace check.
clusterSpec.MachineConfigs[builder.ControlPlaneMachineName].Namespace = "foo-bar"
err := tinkerbell.AssertMachineConfigNamespaceMatchesDatacenterConfig(clusterSpec)
g.Expect(err).ToNot(gomega.Succeed())
}
func TestAssertEtcdMachineRefExists_Exists(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
g.Expect(tinkerbell.AssertEtcdMachineRefExists(clusterSpec)).To(gomega.Succeed())
}
func TestAssertEtcdMachineRefExists_Missing(t *testing.T) {
g := gomega.NewWithT(t)
builder := NewDefaultValidClusterSpecBuilder()
clusterSpec := builder.Build()
delete(clusterSpec.MachineConfigs, builder.ExternalEtcdMachineName)
g.Expect(tinkerbell.AssertEtcdMachineRefExists(clusterSpec)).ToNot(gomega.Succeed())
}
func TestAssertWorkerNodeGroupMachineRefsExists_Exists(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
g.Expect(tinkerbell.AssertWorkerNodeGroupMachineRefsExists(clusterSpec)).To(gomega.Succeed())
}
func TestAssertK8SVersionNot120_Success(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube123
g.Expect(tinkerbell.AssertK8SVersionNot120(clusterSpec)).Error().ShouldNot(gomega.HaveOccurred())
}
func TestAssertK8SVersionNot120_Error(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube120
g.Expect(tinkerbell.AssertK8SVersionNot120(clusterSpec)).Error().Should(gomega.HaveOccurred())
}
func TestAssertWorkerNodeGroupMachineRefsExists_Missing(t *testing.T) {
g := gomega.NewWithT(t)
builder := NewDefaultValidClusterSpecBuilder()
clusterSpec := builder.Build()
delete(clusterSpec.MachineConfigs, builder.WorkerNodeGroupMachineName)
g.Expect(tinkerbell.AssertWorkerNodeGroupMachineRefsExists(clusterSpec)).ToNot(gomega.Succeed())
}
func TestAssertEtcdMachineRefExists_ExternalEtcdUnspecified(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
g.Expect(tinkerbell.AssertEtcdMachineRefExists(clusterSpec)).To(gomega.Succeed())
}
func TestNewIPNotInUseAssertion_NotInUseSucceeds(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
netClient := mocks.NewMockNetClient(ctrl)
netClient.EXPECT().
DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil, errors.New("failed to connect"))
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.NewIPNotInUseAssertion(netClient)
g.Expect(assertion(clusterSpec)).To(gomega.Succeed())
}
func TestNewIPNotInUseAssertion_InUseFails(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
server, client := net.Pipe()
defer server.Close()
netClient := mocks.NewMockNetClient(ctrl)
netClient.EXPECT().
DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(client, nil)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.NewIPNotInUseAssertion(netClient)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestAssertTinkerbellIPNotInUse_NotInUseSucceeds(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
netClient := mocks.NewMockNetClient(ctrl)
netClient.EXPECT().
DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil, errors.New("failed to connect"))
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.AssertTinkerbellIPNotInUse(netClient)
g.Expect(assertion(clusterSpec)).To(gomega.Succeed())
}
func TestAssertTinkerbellIPNotInUse_InUseFails(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
server, client := net.Pipe()
defer server.Close()
netClient := mocks.NewMockNetClient(ctrl)
netClient.EXPECT().
DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(client, nil)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.AssertTinkerbellIPNotInUse(netClient)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestAssertTinkerbellIPAndControlPlaneIPNotSame_DifferentSucceeds(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
g.Expect(tinkerbell.AssertTinkerbellIPAndControlPlaneIPNotSame(clusterSpec)).To(gomega.Succeed())
}
func TestAssertTinkerbellIPAndControlPlaneIPNotSame_SameFails(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.DatacenterConfig.Spec.TinkerbellIP = "1.1.1.1"
g.Expect(tinkerbell.AssertTinkerbellIPAndControlPlaneIPNotSame(clusterSpec)).ToNot(gomega.Succeed())
}
func TestAssertPortsNotInUse_Succeeds(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
netClient := mocks.NewMockNetClient(ctrl)
netClient.EXPECT().
DialTimeout("tcp", gomock.Any(), 500*time.Millisecond).
Times(3).
Return(nil, errors.New("failed to connect"))
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.AssertPortsNotInUse(netClient)
g.Expect(assertion(clusterSpec)).To(gomega.Succeed())
}
func TestAssertPortsNotInUse_Fails(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
server, client := net.Pipe()
defer server.Close()
netClient := mocks.NewMockNetClient(ctrl)
netClient.EXPECT().
DialTimeout("tcp", gomock.Any(), 500*time.Millisecond).
Times(3).
Return(client, nil)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.AssertPortsNotInUse(netClient)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestAssertAssertHookImageURLProxyNonAirgappedURLSuccess(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Cluster.Spec.ProxyConfiguration = &eksav1alpha1.ProxyConfiguration{
HttpProxy: "2.3.4.5",
HttpsProxy: "2.3.4.5",
}
clusterSpec.DatacenterConfig.Spec.HookImagesURLPath = "https://anywhere.eks.amazonaws.com/"
g.Expect(tinkerbell.AssertHookRetrievableWithoutProxy(clusterSpec)).To(gomega.Succeed())
}
func TestAssertAssertHookRetrievableWithoutProxyURLNotProvided(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Cluster.Spec.ProxyConfiguration = &eksav1alpha1.ProxyConfiguration{
HttpProxy: "2.3.4.5",
HttpsProxy: "2.3.4.5",
}
g.Expect(tinkerbell.AssertHookRetrievableWithoutProxy(clusterSpec)).ToNot(gomega.Succeed())
}
func TestAssertAssertHookRetrievableWithoutProxyURLUnreachable(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Cluster.Spec.ProxyConfiguration = &eksav1alpha1.ProxyConfiguration{
HttpProxy: "2.3.4.5",
HttpsProxy: "2.3.4.5",
}
clusterSpec.DatacenterConfig.Spec.HookImagesURLPath = "https://1.2.3.4"
g.Expect(tinkerbell.AssertHookRetrievableWithoutProxy(clusterSpec)).ToNot(gomega.Succeed())
}
func TestMinimumHardwareAvailableAssertionForCreate_SufficientSucceeds(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
catalogue := hardware.NewCatalogue()
// Add something for the control plane.
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Labels: clusterSpec.ControlPlaneMachineConfig().Spec.HardwareSelector,
},
})).To(gomega.Succeed())
// Add something for external etcd
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Labels: clusterSpec.ExternalEtcdMachineConfig().Spec.HardwareSelector,
},
})).To(gomega.Succeed())
// Add something for the worker node group.
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Labels: clusterSpec.WorkerNodeGroupMachineConfig(
clusterSpec.WorkerNodeGroupConfigurations()[0],
).Spec.HardwareSelector,
},
})).To(gomega.Succeed())
assertion := tinkerbell.MinimumHardwareAvailableAssertionForCreate(catalogue)
g.Expect(assertion(clusterSpec)).To(gomega.Succeed())
}
func TestMinimumHardwareAvailableAssertionForCreate_SufficientSucceedsWithoutExternalEtcd(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
catalogue := hardware.NewCatalogue()
// Add something to match the control plane selector.
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Labels: clusterSpec.ControlPlaneMachineConfig().Spec.HardwareSelector,
},
})).To(gomega.Succeed())
// Add something for worker node group.
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Labels: clusterSpec.WorkerNodeGroupMachineConfig(
clusterSpec.WorkerNodeGroupConfigurations()[0],
).Spec.HardwareSelector,
},
})).To(gomega.Succeed())
assertion := tinkerbell.MinimumHardwareAvailableAssertionForCreate(catalogue)
g.Expect(assertion(clusterSpec)).To(gomega.Succeed())
}
func TestMinimumHardwareAvailableAssertionForCreate_NoControlPlaneSelectorMatchesAnything(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.ControlPlaneMachineConfig().Spec.HardwareSelector = eksav1alpha1.HardwareSelector{}
clusterSpec.WorkerNodeGroupConfigurations()[0].Count = ptr.Int(0)
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
catalogue := hardware.NewCatalogue()
// Add something to match the control plane selector.
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{})).To(gomega.Succeed())
assertion := tinkerbell.MinimumHardwareAvailableAssertionForCreate(catalogue)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestMinimumHardwareAvailableAssertionForCreate_NoExternalEtcdSelectorMatchesAnything(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.ControlPlaneConfiguration().Count = 0
clusterSpec.WorkerNodeGroupConfigurations()[0].Count = ptr.Int(0)
clusterSpec.ExternalEtcdMachineConfig().Spec.HardwareSelector = eksav1alpha1.HardwareSelector{}
catalogue := hardware.NewCatalogue()
// Add something to match the control plane selector.
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{})).To(gomega.Succeed())
assertion := tinkerbell.MinimumHardwareAvailableAssertionForCreate(catalogue)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestMinimumHardwareAvailableAssertionForCreate_NoWorkerNodeGroupSelectorMatchesAnything(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.ControlPlaneConfiguration().Count = 0
nodeGroup := clusterSpec.WorkerNodeGroupMachineConfig(clusterSpec.WorkerNodeGroupConfigurations()[0])
nodeGroup.Spec.HardwareSelector = eksav1alpha1.HardwareSelector{}
clusterSpec.ExternalEtcdConfiguration().Count = 0
catalogue := hardware.NewCatalogue()
// Add something to match the control plane selector.
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{})).To(gomega.Succeed())
assertion := tinkerbell.MinimumHardwareAvailableAssertionForCreate(catalogue)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestMinimumHardwareAvailableAssertionForCreate_InsufficientFails(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.MinimumHardwareAvailableAssertionForCreate(catalogue)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestMinimumHardwareAvailableAssertionForCreate_InsufficientFailsWithoutExternalEtcd(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
assertion := tinkerbell.MinimumHardwareAvailableAssertionForCreate(catalogue)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestValidatableClusterControlPlaneReplicaCount(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
validatableCluster := &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}
g.Expect(validatableCluster.ControlPlaneReplicaCount()).To(gomega.Equal(1))
}
func TestValidatableClusterWorkerNodeGroupConfigs(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
validatableCluster := &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}
workerConfigs := validatableCluster.WorkerNodeHardwareGroups()
g.Expect(workerConfigs[0].MachineDeploymentName).To(gomega.Equal("cluster-worker-node-group-0"))
g.Expect(workerConfigs[0].Replicas).To(gomega.Equal(1))
}
func TestValidatableTinkerbellCAPIControlPlaneReplicaCount(t *testing.T) {
g := gomega.NewWithT(t)
validatableCAPI := validatableTinkerbellCAPI()
g.Expect(validatableCAPI.ControlPlaneReplicaCount()).To(gomega.Equal(1))
}
func TestValidatableTinkerbellCAPIWorkerNodeGroupConfigs(t *testing.T) {
g := gomega.NewWithT(t)
validatableCAPI := validatableTinkerbellCAPI()
workerConfigs := validatableCAPI.WorkerNodeHardwareGroups()
g.Expect(workerConfigs[0].MachineDeploymentName).To(gomega.Equal("cluster-worker-node-group-0"))
g.Expect(workerConfigs[0].Replicas).To(gomega.Equal(1))
}
func TestAssertionsForScaleUpDown_Success(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, true)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}
func TestAssertionsForScaleUpDown_CAPISuccess(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
tinkerbellCAPI := validatableTinkerbellCAPI()
assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, tinkerbellCAPI, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
check := &tinkerbell.ValidatableTinkerbellClusterSpec{newClusterSpec}
t.Log(tinkerbellCAPI.WorkerNodeHardwareGroups()[0].MachineDeploymentName)
t.Log(check.WorkerNodeHardwareGroups()[0].MachineDeploymentName)
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}
func TestAssertionsForScaleUpDown_ScaleUpControlPlaneSuccess(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
_ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"type": "cp"},
}})
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count = 2
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}
func TestAssertionsForScaleUpDown_ScaleUpWorkerSuccess(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
_ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"type": "worker"},
}})
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(2)
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}
func TestAssertionsForScaleUpDown_AddWorkerSuccess(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
_ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"type": "worker"},
}})
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
clusterSpec.Spec.Cluster.Spec.WorkerNodeGroupConfigurations = []eksav1alpha1.WorkerNodeGroupConfiguration{}
assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}
func TestAssertionsForScaleUpDown_ExternalEtcdErrorFails(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, true)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
g.Expect(assertion(newClusterSpec)).To(gomega.MatchError(gomega.ContainSubstring("scale up/down not supported for external etcd")))
}
func TestAssertionsForScaleUpDown_FailsScaleUpAndRollingError(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
assertion := tinkerbell.AssertionsForScaleUpDown(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, true)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.WorkerNodeGroupConfigurations()[0].Count = ptr.Int(2)
g.Expect(assertion(newClusterSpec)).NotTo(gomega.Succeed())
}
func TestHardwareSatisfiesOnlyOneSelectorAssertion_MeetsOnlyOneSelector(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
catalogue := hardware.NewCatalogue()
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Labels: clusterSpec.ControlPlaneMachineConfig().Spec.HardwareSelector,
},
})).To(gomega.Succeed())
assertion := tinkerbell.HardwareSatisfiesOnlyOneSelectorAssertion(catalogue)
g.Expect(assertion(clusterSpec)).To(gomega.Succeed())
}
func TestHardwareSatisfiesOnlyOneSelectorAssertion_MeetsMultipleSelectorFails(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
// Ensure we have distinct labels for selectors so we can populate the same key on the
// test hardware.
clusterSpec.ExternalEtcdMachineConfig().Spec.HardwareSelector = map[string]string{
"etcd": "etcd",
}
catalogue := hardware.NewCatalogue()
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "test",
Labels: mergeHardwareSelectors(
clusterSpec.ControlPlaneMachineConfig().Spec.HardwareSelector,
clusterSpec.ExternalEtcdMachineConfig().Spec.HardwareSelector,
),
},
})).To(gomega.Succeed())
assertion := tinkerbell.HardwareSatisfiesOnlyOneSelectorAssertion(catalogue)
g.Expect(assertion(clusterSpec)).ToNot(gomega.Succeed())
}
func TestHardwareSatisfiesOnlyOneSelectorAssertion_NoLabelsMeetsNothing(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
catalogue := hardware.NewCatalogue()
g.Expect(catalogue.InsertHardware(&v1alpha1.Hardware{})).To(gomega.Succeed())
assertion := tinkerbell.HardwareSatisfiesOnlyOneSelectorAssertion(catalogue)
g.Expect(assertion(clusterSpec)).To(gomega.Succeed())
}
// mergeHardwareSelectors merges m1 with m2. Values already in m1 will be overwritten by m2.
func mergeHardwareSelectors(m1, m2 map[string]string) map[string]string {
for name, value := range m2 {
m1[name] = value
}
return m1
}
func validatableTinkerbellCAPI() *tinkerbell.ValidatableTinkerbellCAPI {
return &tinkerbell.ValidatableTinkerbellCAPI{
KubeadmControlPlane: &controlplanev1.KubeadmControlPlane{
Spec: controlplanev1.KubeadmControlPlaneSpec{
Replicas: ptr.Int32(1),
Version: "1.22",
},
},
WorkerGroups: workerGroups(),
}
}
func workerGroups() []*clusterapi.WorkerGroup[*v1beta1.TinkerbellMachineTemplate] {
return []*clusterapi.WorkerGroup[*v1beta1.TinkerbellMachineTemplate]{
{
MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) {
md.Name = "cluster-worker-node-group-0"
}),
ProviderMachineTemplate: machineTemplate(),
},
}
}
| 698 |
eks-anywhere | aws | Go | package tinkerbell
import (
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
// ClusterSpec represents a cluster configuration for Tinkerbell provisioning.
type ClusterSpec struct {
*cluster.Spec
// DatacenterConfig configured in the cluster configuration YAML.
DatacenterConfig *v1alpha1.TinkerbellDatacenterConfig
// MachineConfigs configured in the cluster configuration YAML whether they're used or not.
MachineConfigs map[string]*v1alpha1.TinkerbellMachineConfig
}
// NewClusterSpec creates a ClusterSpec instance.
func NewClusterSpec(
clusterSpec *cluster.Spec,
machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig,
datacenterConfig *v1alpha1.TinkerbellDatacenterConfig,
) *ClusterSpec {
return &ClusterSpec{
Spec: clusterSpec,
DatacenterConfig: datacenterConfig,
MachineConfigs: machineConfigs,
}
}
// ControlPlaneMachineConfig retrieves the TinkerbellMachineConfig referenced by the cluster
// control plane machine reference.
func (s *ClusterSpec) ControlPlaneMachineConfig() *v1alpha1.TinkerbellMachineConfig {
return s.MachineConfigs[s.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
}
// ControlPlaneConfiguration retrieves the control plane configuration from s.
func (s *ClusterSpec) ControlPlaneConfiguration() *v1alpha1.ControlPlaneConfiguration {
return &s.Cluster.Spec.ControlPlaneConfiguration
}
// HasExternalEtcd returns true if there is an external etcd configuration.
func (s *ClusterSpec) HasExternalEtcd() bool {
return s.Spec.Cluster.Spec.ExternalEtcdConfiguration != nil
}
// ExternalEtcdConfiguration returns the etcd configuration. The configuration may be nil. Consumers
// should check if external etcd configuration is present using HasExternalEtcd().
func (s *ClusterSpec) ExternalEtcdConfiguration() *v1alpha1.ExternalEtcdConfiguration {
return s.Cluster.Spec.ExternalEtcdConfiguration
}
// ExternalEtcdMachineConfig retrieves the TinkerbellMachineConfig referenced by the cluster etcd machine
// reference.
func (s *ClusterSpec) ExternalEtcdMachineConfig() *v1alpha1.TinkerbellMachineConfig {
if !s.HasExternalEtcd() {
return nil
}
return s.MachineConfigs[s.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name]
}
// WorkerNodeGroupConfigurations retrieves all worker node group configurations in s.
func (s *ClusterSpec) WorkerNodeGroupConfigurations() []v1alpha1.WorkerNodeGroupConfiguration {
return s.Cluster.Spec.WorkerNodeGroupConfigurations
}
// WorkerNodeGroupMachineConfig retrieves the machine group associated with conf.
func (s *ClusterSpec) WorkerNodeGroupMachineConfig(conf v1alpha1.WorkerNodeGroupConfiguration) *v1alpha1.TinkerbellMachineConfig {
return s.MachineConfigs[conf.MachineGroupRef.Name]
}
// ClusterSpecAssertion makes an assertion on spec.
type ClusterSpecAssertion func(spec *ClusterSpec) error
// ClusterSpecValidator is composed of a set of ClusterSpecAssertions to be run on a ClusterSpec
// instance.
type ClusterSpecValidator []ClusterSpecAssertion
// Register registers assertions with v.
func (v *ClusterSpecValidator) Register(assertions ...ClusterSpecAssertion) {
*v = append(*v, assertions...)
}
// Validate validates spec with all assertions registered on v.
func (v *ClusterSpecValidator) Validate(spec *ClusterSpec) error {
for _, a := range *v {
if err := a(spec); err != nil {
return err
}
}
return nil
}
// NewClusterSpecValidator creates a ClusterSpecValidator instance with a set of default assertions.
// Any assertions passed will be registered in addition to the default assertions.
func NewClusterSpecValidator(assertions ...ClusterSpecAssertion) *ClusterSpecValidator {
var v ClusterSpecValidator
// Register mandatory assertions. If an assertion becomes optional dependent on context move it
// to a New* func and register it dynamically. See assert.go for examples.
v.Register(
AssertK8SVersionNot120,
AssertDatacenterConfigValid,
AssertControlPlaneMachineRefExists,
AssertEtcdMachineRefExists,
AssertWorkerNodeGroupMachineRefsExists,
AssertMachineConfigsValid,
AssertMachineConfigNamespaceMatchesDatacenterConfig,
AssertOsFamilyValid,
AssertTinkerbellIPAndControlPlaneIPNotSame,
AssertHookRetrievableWithoutProxy,
)
v.Register(assertions...)
return &v
}
| 117 |
eks-anywhere | aws | Go | package tinkerbell_test
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
type ValidClusterSpecBuilder struct {
ControlPlaneMachineName string
ExternalEtcdMachineName string
WorkerNodeGroupMachineName string
Namespace string
IncludeHardwareSelectors bool
}
func NewDefaultValidClusterSpecBuilder() ValidClusterSpecBuilder {
return ValidClusterSpecBuilder{
ControlPlaneMachineName: "control-plane",
ExternalEtcdMachineName: "external-etcd",
WorkerNodeGroupMachineName: "worker-node-group",
Namespace: "namespace",
IncludeHardwareSelectors: true,
}
}
func (b *ValidClusterSpecBuilder) WithoutHardwareSelectors() {
b.IncludeHardwareSelectors = false
}
func (b ValidClusterSpecBuilder) Build() *tinkerbell.ClusterSpec {
spec := &tinkerbell.ClusterSpec{
Spec: &cluster.Spec{
Config: &cluster.Config{
Cluster: &v1alpha1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "cluster",
},
Spec: v1alpha1.ClusterSpec{
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.TinkerbellMachineConfigKind,
Name: b.ControlPlaneMachineName,
},
Endpoint: &v1alpha1.Endpoint{
Host: "1.1.1.1",
},
},
ExternalEtcdConfiguration: &v1alpha1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.TinkerbellMachineConfigKind,
Name: b.ExternalEtcdMachineName,
},
},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{
{
Name: "worker-node-group-0",
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.TinkerbellMachineConfigKind,
Name: b.WorkerNodeGroupMachineName,
},
},
},
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.TinkerbellDatacenterKind,
Name: "tinkerbell-data-center",
},
},
},
TinkerbellDatacenter: b.NewDefaultTinkerbellDatacenter(),
TinkerbellMachineConfigs: b.NewDefaultTinkerbellMachineConfigs(),
},
},
DatacenterConfig: &v1alpha1.TinkerbellDatacenterConfig{
ObjectMeta: v1.ObjectMeta{
Name: "datacenter-config",
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellDatacenterConfigSpec{
TinkerbellIP: "1.1.1.2",
OSImageURL: "https://ubuntu.gz",
},
},
MachineConfigs: map[string]*v1alpha1.TinkerbellMachineConfig{
b.ControlPlaneMachineName: {
ObjectMeta: v1.ObjectMeta{
Name: b.ControlPlaneMachineName,
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: v1alpha1.HardwareSelector{"type": "cp"},
OSFamily: v1alpha1.Ubuntu,
Users: []v1alpha1.UserConfiguration{
{
Name: "ec2-user",
SshAuthorizedKeys: []string{
"test-rsa ABCDEFGHIJKLMNOPQRSTUVWXYZ",
},
},
},
},
},
b.ExternalEtcdMachineName: {
ObjectMeta: v1.ObjectMeta{
Name: b.ExternalEtcdMachineName,
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: v1alpha1.HardwareSelector{"type": "etcd"},
OSFamily: v1alpha1.Ubuntu,
Users: []v1alpha1.UserConfiguration{
{
Name: "ec2-user",
SshAuthorizedKeys: []string{
"test-rsa ABCDEFGHIJKLMNOPQRSTUVWXYZ",
},
},
},
},
},
b.WorkerNodeGroupMachineName: {
ObjectMeta: v1.ObjectMeta{
Name: b.WorkerNodeGroupMachineName,
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: v1alpha1.HardwareSelector{"type": "worker"},
OSFamily: v1alpha1.Ubuntu,
Users: []v1alpha1.UserConfiguration{
{
Name: "ec2-user",
SshAuthorizedKeys: []string{
"test-rsa ABCDEFGHIJKLMNOPQRSTUVWXYZ",
},
},
},
},
},
},
}
if !b.IncludeHardwareSelectors {
for _, config := range spec.MachineConfigs {
config.Spec.HardwareSelector = v1alpha1.HardwareSelector{}
}
}
return spec
}
func (b ValidClusterSpecBuilder) NewDefaultTinkerbellDatacenter() *v1alpha1.TinkerbellDatacenterConfig {
return &v1alpha1.TinkerbellDatacenterConfig{
ObjectMeta: v1.ObjectMeta{
Name: "datacenter-config",
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellDatacenterConfigSpec{
TinkerbellIP: "1.1.1.2",
OSImageURL: "https://ubuntu.gz",
},
}
}
func (b ValidClusterSpecBuilder) NewDefaultTinkerbellMachineConfigs() map[string]*v1alpha1.TinkerbellMachineConfig {
return map[string]*v1alpha1.TinkerbellMachineConfig{
b.ControlPlaneMachineName: {
ObjectMeta: v1.ObjectMeta{
Name: b.ControlPlaneMachineName,
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: v1alpha1.HardwareSelector{"type": "cp"},
OSFamily: v1alpha1.Ubuntu,
Users: []v1alpha1.UserConfiguration{
{
Name: "ec2-user",
},
},
},
},
b.ExternalEtcdMachineName: {
ObjectMeta: v1.ObjectMeta{
Name: b.ExternalEtcdMachineName,
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: v1alpha1.HardwareSelector{"type": "etcd"},
OSFamily: v1alpha1.Ubuntu,
Users: []v1alpha1.UserConfiguration{
{
Name: "ec2-user",
},
},
},
},
b.WorkerNodeGroupMachineName: {
ObjectMeta: v1.ObjectMeta{
Name: b.WorkerNodeGroupMachineName,
Namespace: b.Namespace,
},
Spec: v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: v1alpha1.HardwareSelector{"type": "worker"},
OSFamily: v1alpha1.Ubuntu,
Users: []v1alpha1.UserConfiguration{
{
Name: "ec2-user",
},
},
},
},
}
}
| 219 |
eks-anywhere | aws | Go | package tinkerbell_test
import (
"errors"
"testing"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
)
func TestClusterSpecValidator_AssertionsWithoutError(t *testing.T) {
g := gomega.NewWithT(t)
assertions := []*MockAssertion{{}, {}, {}}
validator := tinkerbell.NewClusterSpecValidator()
for _, assertion := range assertions {
validator.Register(assertion.ClusterSpecAsseritonFunc())
}
g.Expect(validator.Validate(NewDefaultValidClusterSpecBuilder().Build())).To(gomega.Succeed())
for _, assertion := range assertions {
g.Expect(assertion.Called).To(gomega.BeTrue())
}
}
func TestClusterSpecValidator_AssertionsWithError(t *testing.T) {
g := gomega.NewWithT(t)
assertions := []*MockAssertion{{}, {}, {}}
assertions[0].Return = errors.New("assertion error")
validator := tinkerbell.NewClusterSpecValidator()
for _, assertion := range assertions {
validator.Register(assertion.ClusterSpecAsseritonFunc())
}
g.Expect(validator.Validate(NewDefaultValidClusterSpecBuilder().Build())).ToNot(gomega.Succeed())
g.Expect(assertions[0].Called).To(gomega.BeTrue())
g.Expect(assertions[1].Called).To(gomega.BeFalse())
g.Expect(assertions[1].Called).To(gomega.BeFalse())
}
type MockAssertion struct {
Called bool
Return error
}
func (a *MockAssertion) ClusterSpecAsseritonFunc() tinkerbell.ClusterSpecAssertion {
return func(*tinkerbell.ClusterSpec) error {
a.Called = true
return a.Return
}
}
| 54 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"github.com/go-logr/logr"
"github.com/pkg/errors"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
corev1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
yamlcapi "github.com/aws/eks-anywhere/pkg/clusterapi/yaml"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
// BaseControlPlane represents a CAPI Tinkerbell control plane.
type BaseControlPlane = clusterapi.ControlPlane[*tinkerbellv1.TinkerbellCluster, *tinkerbellv1.TinkerbellMachineTemplate]
// ControlPlane holds the Tinkerbell specific objects for a CAPI Tinkerbell control plane.
type ControlPlane struct {
BaseControlPlane
Secrets *corev1.Secret
}
// Objects returns the control plane objects associated with the Tinkerbell cluster.
func (p ControlPlane) Objects() []kubernetes.Object {
o := p.BaseControlPlane.Objects()
o = append(o, p.Secrets)
return o
}
// ControlPlaneSpec builds a Tinkerbell ControlPlane definition based on an eks-a cluster spec.
func ControlPlaneSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, clusterSpec *cluster.Spec) (*ControlPlane, error) {
templateBuilder, err := generateTemplateBuilder(clusterSpec)
if err != nil {
return nil, errors.Wrap(err, "generating tinkerbell template builder")
}
controlPlaneYaml, err := templateBuilder.GenerateCAPISpecControlPlane(
clusterSpec,
func(values map[string]interface{}) {
values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster)
values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(clusterSpec.Cluster)
},
)
if err != nil {
return nil, errors.Wrap(err, "generating tinkerbell control plane yaml spec")
}
parser, builder, err := newControlPlaneParser(logger)
if err != nil {
return nil, err
}
err = parser.Parse(controlPlaneYaml, builder)
if err != nil {
return nil, errors.Wrap(err, "parsing tinkerbell control plane yaml")
}
cp := builder.ControlPlane
if err = cp.UpdateImmutableObjectNames(ctx, client, GetMachineTemplate, machineTemplateEqual); err != nil {
return nil, errors.Wrap(err, "updating tinkerbell immutable object names")
}
return cp, nil
}
// ControlPlaneBuilder defines the builder for all objects in the CAPI Tinkerbell control plane.
type controlPlaneBuilder struct {
BaseBuilder *yamlcapi.ControlPlaneBuilder[*tinkerbellv1.TinkerbellCluster, *tinkerbellv1.TinkerbellMachineTemplate]
ControlPlane *ControlPlane
}
// BuildFromParsed implements the base yamlcapi.BuildFromParsed and processes any additional objects (secrets) for the Tinkerbell control plane.
func (b *controlPlaneBuilder) BuildFromParsed(lookup yamlutil.ObjectLookup) error {
if err := b.BaseBuilder.BuildFromParsed(lookup); err != nil {
return err
}
b.ControlPlane.BaseControlPlane = *b.BaseBuilder.ControlPlane
for _, obj := range lookup {
if obj.GetObjectKind().GroupVersionKind().Kind == constants.SecretKind {
b.ControlPlane.Secrets = obj.(*corev1.Secret)
}
}
return nil
}
func newControlPlaneParser(logger logr.Logger) (*yamlutil.Parser, *controlPlaneBuilder, error) {
parser, baseBuilder, err := yamlcapi.NewControlPlaneParserAndBuilder(
logger,
yamlutil.NewMapping(
"TinkerbellCluster",
func() *tinkerbellv1.TinkerbellCluster {
return &tinkerbellv1.TinkerbellCluster{}
},
),
yamlutil.NewMapping(
"TinkerbellMachineTemplate",
func() *tinkerbellv1.TinkerbellMachineTemplate {
return &tinkerbellv1.TinkerbellMachineTemplate{}
},
),
)
if err != nil {
return nil, nil, errors.Wrap(err, "building tinkerbell control plane parser")
}
err = parser.RegisterMappings(
yamlutil.NewMapping(constants.SecretKind, func() yamlutil.APIObject {
return &corev1.Secret{}
}),
)
if err != nil {
return nil, nil, errors.Wrap(err, "registering tinkerbell control plane mappings in parser")
}
builder := &controlPlaneBuilder{
BaseBuilder: baseBuilder,
ControlPlane: &ControlPlane{},
}
return parser, builder, nil
}
| 130 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"testing"
. "github.com/onsi/gomega"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/constants"
)
const (
testClusterConfigFilename = "testdata/cluster_tinkerbell_awsiam.yaml"
)
func TestControlPlaneObjects(t *testing.T) {
tests := []struct {
name string
controlPlane *ControlPlane
expected []kubernetes.Object
}{
{
name: "stacked etcd",
controlPlane: &ControlPlane{
BaseControlPlane: BaseControlPlane{
Cluster: capiCluster(),
ProviderCluster: tinkerbellCluster(),
KubeadmControlPlane: kubeadmControlPlane(),
ControlPlaneMachineTemplate: tinkerbellMachineTemplate("controlplane-machinetemplate"),
},
Secrets: secret(),
},
expected: []kubernetes.Object{
capiCluster(),
tinkerbellCluster(),
kubeadmControlPlane(),
tinkerbellMachineTemplate("controlplane-machinetemplate"),
secret(),
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tc.controlPlane.Objects()).To(ConsistOf(tc.expected))
})
}
}
func TestControlPlaneSpecNewCluster(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane()))
g.Expect(cp.ProviderCluster).To(Equal(tinkerbellCluster()))
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("test-control-plane-1"))
}
func TestControlPlaneSpecNoChangesMachineTemplates(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
originalKCP := kubeadmControlPlane()
originalCPMachineTemplate := tinkerbellMachineTemplate("test-control-plane-1")
expectedKCP := originalKCP.DeepCopy()
expectedCPtemplate := originalCPMachineTemplate.DeepCopy()
client := test.NewFakeKubeClient(
originalKCP,
originalCPMachineTemplate,
)
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(expectedKCP))
g.Expect(cp.ProviderCluster).To(Equal(tinkerbellCluster()))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(expectedCPtemplate))
}
func TestControlPlaneSpecUpdateMachineTemplates(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
originalKubeadmControlPlane := kubeadmControlPlane()
originalCPMachineTemplate := tinkerbellMachineTemplate("test-control-plane")
expectedKCP := originalKubeadmControlPlane.DeepCopy()
expectedCPTemplate := originalCPMachineTemplate.DeepCopy()
client := test.NewFakeKubeClient(
originalKubeadmControlPlane,
originalCPMachineTemplate,
)
cpTaints := []corev1.Taint{
{
Key: "foo",
Value: "bar",
Effect: "PreferNoSchedule",
},
}
spec.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints
expectedKCP.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.Taints = cpTaints
expectedKCP.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.Taints = cpTaints
expectedKCP.Spec.MachineTemplate.InfrastructureRef.Name = "test-control-plane-1"
expectedCPTemplate.Name = "test-control-plane-1"
expectedCPTemplate.Spec.Template.Spec.TemplateOverride = "global_timeout: 6000\nid: \"\"\nname: tink-test\ntasks:\n- actions:\n - environment:\n COMPRESSED: \"true\"\n DEST_DISK: /dev/sda\n IMG_URL: \"\"\n image: image2disk:v1.0.0\n name: stream-image\n timeout: 360\n - environment:\n BLOCK_DEVICE: /dev/sda2\n CHROOT: \"y\"\n CMD_LINE: apt -y update && apt -y install openssl\n DEFAULT_INTERPRETER: /bin/sh -c\n FS_TYPE: ext4\n image: cexec:v1.0.0\n name: install-openssl\n timeout: 90\n - environment:\n CONTENTS: |\n network:\n version: 2\n renderer: networkd\n ethernets:\n eno1:\n dhcp4: true\n eno2:\n dhcp4: true\n eno3:\n dhcp4: true\n eno4:\n dhcp4: true\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/netplan/config.yaml\n DIRMODE: \"0755\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0644\"\n UID: \"0\"\n image: writefile:v1.0.0\n name: write-netplan\n timeout: 90\n - environment:\n CONTENTS: |\n datasource:\n Ec2:\n metadata_urls: []\n strict_id: false\n system_info:\n default_user:\n name: tink\n groups: [wheel, adm]\n sudo: [\"ALL=(ALL) NOPASSWD:ALL\"]\n shell: /bin/bash\n manage_etc_hosts: localhost\n warnings:\n dsid_missing_source: off\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0600\"\n image: writefile:v1.0.0\n name: add-tink-cloud-init-config\n timeout: 90\n - environment:\n CONTENTS: |\n datasource: Ec2\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/cloud/ds-identify.cfg\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0600\"\n UID: \"0\"\n image: writefile:v1.0.0\n name: add-tink-cloud-init-ds-config\n timeout: 90\n - environment:\n BLOCK_DEVICE: /dev/sda2\n FS_TYPE: ext4\n image: kexec:v1.0.0\n name: kexec-image\n pid: host\n timeout: 90\n name: tink-test\n volumes:\n - /dev:/dev\n - /dev/console:/dev/console\n - /lib/firmware:/lib/firmware:ro\n worker: '{{.device_1}}'\nversion: \"0.1\"\n"
expectedCPTemplate.Spec.Template.Spec.HardwareAffinity = &tinkerbellv1.HardwareAffinity{
Required: []tinkerbellv1.HardwareAffinityTerm{
{
LabelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"type": "cp"}},
},
},
}
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(expectedKCP))
g.Expect(cp.ProviderCluster).To(Equal(tinkerbellCluster()))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(expectedCPTemplate))
}
func TestControlPlaneSpecRegistryMirrorAuthentication(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
spec.Cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{
Authenticate: true,
}
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(kcpWithRegistryCredentials()))
g.Expect(cp.ProviderCluster).To(Equal(tinkerbellCluster()))
g.Expect(cp.Secrets).To(Equal(secret()))
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("test-control-plane-1"))
}
func TestControlPlaneSpecRegistryMirrorInsecureSkipVerify(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigFilename)
tests := []struct {
name string
mirrorConfig *anywherev1.RegistryMirrorConfiguration
files []bootstrapv1.File
}{
{
name: "insecure skip verify",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerify(),
},
{
name: "insecure skip verify with cacert",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
spec.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig
cp, err := ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Spec.KubeadmConfigSpec.Files = append(kcp.Spec.KubeadmConfigSpec.Files, tt.files...)
kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands, test.RegistryMirrorSudoPreKubeadmCommands()...)
})))
g.Expect(cp.ProviderCluster).To(Equal(tinkerbellCluster()))
g.Expect(cp.Secrets).To(BeNil())
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("test-control-plane-1"))
})
}
}
func tinkerbellCluster() *tinkerbellv1.TinkerbellCluster {
return &tinkerbellv1.TinkerbellCluster{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: constants.EksaSystemNamespace,
},
Spec: tinkerbellv1.TinkerbellClusterSpec{
ImageLookupFormat: "--kube-v1.21.2-eks-1-21-4.raw.gz",
ImageLookupBaseRegistry: "/",
},
}
}
func kubeadmControlPlane(opts ...func(*controlplanev1.KubeadmControlPlane)) *controlplanev1.KubeadmControlPlane {
var kcp *controlplanev1.KubeadmControlPlane
b := []byte(`apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: test
namespace: eksa-system
spec:
kubeadmConfigSpec:
clusterConfiguration:
imageRepository: public.ecr.aws/eks-distro/kubernetes
etcd:
local:
imageRepository: public.ecr.aws/eks-distro/etcd-io
imageTag: v3.4.16-eks-1-21-4
dns:
imageRepository: public.ecr.aws/eks-distro/coredns
imageTag: v1.8.3-eks-1-21-4
apiServer:
extraArgs:
authentication-token-webhook-config-file: /etc/kubernetes/aws-iam-authenticator/kubeconfig.yaml
feature-gates: ServiceLoadBalancerClass=true
extraVolumes:
- hostPath: /var/lib/kubeadm/aws-iam-authenticator/
mountPath: /etc/kubernetes/aws-iam-authenticator/
name: authconfig
readOnly: false
- hostPath: /var/lib/kubeadm/aws-iam-authenticator/pki/
mountPath: /var/aws-iam-authenticator/
name: awsiamcert
readOnly: false
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
read-only-port: 0
provider-id: PROVIDER_ID
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
anonymous-auth: false
joinConfiguration:
nodeRegistration:
ignorePreflightErrors:
- DirAvailable--etc-kubernetes-manifests
kubeletExtraArgs:
anonymous-auth: false
provider-id: PROVIDER_ID
read-only-port: 0
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
files:
- content: |
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
- name: address
value: 1.2.3.4
image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig
status: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
- content: |
# clusters refers to the remote service.
clusters:
- name: aws-iam-authenticator
cluster:
certificate-authority: /var/aws-iam-authenticator/cert.pem
server: https://localhost:21362/authenticate
# users refers to the API Server's webhook configuration
# (we don't need to authenticate the API server).
users:
- name: apiserver
# kubeconfig files require a context. Provide one for the API Server.
current-context: webhook
contexts:
- name: webhook
context:
cluster: aws-iam-authenticator
user: apiserver
permissions: "0640"
owner: root:root
path: /var/lib/kubeadm/aws-iam-authenticator/kubeconfig.yaml
- contentFrom:
secret:
name: test-aws-iam-authenticator-ca
key: cert.pem
permissions: "0640"
owner: root:root
path: /var/lib/kubeadm/aws-iam-authenticator/pki/cert.pem
- contentFrom:
secret:
name: test-aws-iam-authenticator-ca
key: key.pem
permissions: "0640"
owner: root:root
path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem
users:
- name: tink-user
sshAuthorizedKeys:
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=='
sudo: ALL=(ALL) NOPASSWD:ALL
format: cloud-config
rolloutStrategy:
rollingUpdate:
maxSurge: 1
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: TinkerbellMachineTemplate
name: test-control-plane-1
replicas: 1
version: v1.21.2-eks-1-21-4`)
if err := yaml.UnmarshalStrict(b, &kcp); err != nil {
return nil
}
for _, opt := range opts {
opt(kcp)
}
return kcp
}
func kcpWithRegistryCredentials() *controlplanev1.KubeadmControlPlane {
var kcp *controlplanev1.KubeadmControlPlane
b := []byte(`apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
creationTimestamp: null
name: test
namespace: eksa-system
spec:
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
extraArgs:
authentication-token-webhook-config-file: /etc/kubernetes/aws-iam-authenticator/kubeconfig.yaml
feature-gates: ServiceLoadBalancerClass=true
extraVolumes:
- hostPath: /var/lib/kubeadm/aws-iam-authenticator/
mountPath: /etc/kubernetes/aws-iam-authenticator/
name: authconfig
- hostPath: /var/lib/kubeadm/aws-iam-authenticator/pki/
mountPath: /var/aws-iam-authenticator/
name: awsiamcert
bottlerocketAdmin: {}
bottlerocketBootstrap: {}
bottlerocketControl: {}
controllerManager: {}
dns:
imageRepository: public.ecr.aws/eks-distro/coredns
imageTag: v1.8.3-eks-1-21-4
etcd:
local:
imageRepository: public.ecr.aws/eks-distro/etcd-io
imageTag: v3.4.16-eks-1-21-4
imageRepository: public.ecr.aws/eks-distro/kubernetes
networking: {}
pause: {}
proxy: {}
registryMirror: {}
scheduler: {}
files:
- content: |
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
- name: address
value: 1.2.3.4
image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig
status: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
- content: |
# clusters refers to the remote service.
clusters:
- name: aws-iam-authenticator
cluster:
certificate-authority: /var/aws-iam-authenticator/cert.pem
server: https://localhost:21362/authenticate
# users refers to the API Server's webhook configuration
# (we don't need to authenticate the API server).
users:
- name: apiserver
# kubeconfig files require a context. Provide one for the API Server.
current-context: webhook
contexts:
- name: webhook
context:
cluster: aws-iam-authenticator
user: apiserver
owner: root:root
path: /var/lib/kubeadm/aws-iam-authenticator/kubeconfig.yaml
permissions: "0640"
- contentFrom:
secret:
key: cert.pem
name: test-aws-iam-authenticator-ca
owner: root:root
path: /var/lib/kubeadm/aws-iam-authenticator/pki/cert.pem
permissions: "0640"
- contentFrom:
secret:
key: key.pem
name: test-aws-iam-authenticator-ca
owner: root:root
path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem
permissions: "0640"
- content: |
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"]
endpoint = ["https://:"]
[plugins."io.containerd.grpc.v1.cri".registry.configs.":".auth]
username = "username"
password = "password"
owner: root:root
path: /etc/containerd/config_append.toml
format: cloud-config
initConfiguration:
localAPIEndpoint: {}
nodeRegistration:
kubeletExtraArgs:
anonymous-auth: "false"
provider-id: PROVIDER_ID
read-only-port: "0"
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
joinConfiguration:
bottlerocketAdmin: {}
bottlerocketBootstrap: {}
bottlerocketControl: {}
discovery: {}
nodeRegistration:
ignorePreflightErrors:
- DirAvailable--etc-kubernetes-manifests
kubeletExtraArgs:
anonymous-auth: "false"
provider-id: PROVIDER_ID
read-only-port: "0"
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
pause: {}
proxy: {}
registryMirror: {}
preKubeadmCommands:
- cat /etc/containerd/config_append.toml >> /etc/containerd/config.toml
- sudo systemctl daemon-reload
- sudo systemctl restart containerd
users:
- name: tink-user
sshAuthorizedKeys:
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=='
sudo: ALL=(ALL) NOPASSWD:ALL
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: TinkerbellMachineTemplate
name: test-control-plane-1
metadata: {}
replicas: 1
rolloutStrategy:
rollingUpdate:
maxSurge: 1
version: v1.21.2-eks-1-21-4
status:
initialized: false
ready: false
readyReplicas: 0
replicas: 0
unavailableReplicas: 0
updatedReplicas: 0
`)
if err := yaml.UnmarshalStrict(b, &kcp); err != nil {
return nil
}
return kcp
}
func tinkerbellMachineTemplate(name string) *tinkerbellv1.TinkerbellMachineTemplate {
return &tinkerbellv1.TinkerbellMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.EksaSystemNamespace,
},
Spec: tinkerbellv1.TinkerbellMachineTemplateSpec{
Template: tinkerbellv1.TinkerbellMachineTemplateResource{
Spec: tinkerbellv1.TinkerbellMachineSpec{
TemplateOverride: "global_timeout: 6000\nid: \"\"\nname: tink-test\ntasks:\n- actions:\n - environment:\n COMPRESSED: \"true\"\n DEST_DISK: /dev/sda\n IMG_URL: \"\"\n image: image2disk:v1.0.0\n name: stream-image\n timeout: 360\n - environment:\n BLOCK_DEVICE: /dev/sda2\n CHROOT: \"y\"\n CMD_LINE: apt -y update && apt -y install openssl\n DEFAULT_INTERPRETER: /bin/sh -c\n FS_TYPE: ext4\n image: cexec:v1.0.0\n name: install-openssl\n timeout: 90\n - environment:\n CONTENTS: |\n network:\n version: 2\n renderer: networkd\n ethernets:\n eno1:\n dhcp4: true\n eno2:\n dhcp4: true\n eno3:\n dhcp4: true\n eno4:\n dhcp4: true\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/netplan/config.yaml\n DIRMODE: \"0755\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0644\"\n UID: \"0\"\n image: writefile:v1.0.0\n name: write-netplan\n timeout: 90\n - environment:\n CONTENTS: |\n datasource:\n Ec2:\n metadata_urls: []\n strict_id: false\n system_info:\n default_user:\n name: tink\n groups: [wheel, adm]\n sudo: [\"ALL=(ALL) NOPASSWD:ALL\"]\n shell: /bin/bash\n manage_etc_hosts: localhost\n warnings:\n dsid_missing_source: off\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0600\"\n image: writefile:v1.0.0\n name: add-tink-cloud-init-config\n timeout: 90\n - environment:\n CONTENTS: |\n datasource: Ec2\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/cloud/ds-identify.cfg\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0600\"\n UID: \"0\"\n image: writefile:v1.0.0\n name: add-tink-cloud-init-ds-config\n timeout: 90\n - environment:\n BLOCK_DEVICE: /dev/sda2\n FS_TYPE: ext4\n image: kexec:v1.0.0\n name: kexec-image\n pid: host\n timeout: 90\n name: tink-test\n volumes:\n - /dev:/dev\n - /dev/console:/dev/console\n - /lib/firmware:/lib/firmware:ro\n worker: '{{.device_1}}'\nversion: \"0.1\"\n",
HardwareAffinity: &tinkerbellv1.HardwareAffinity{
Required: []tinkerbellv1.HardwareAffinityTerm{
{
LabelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"type": "cp"}},
},
},
},
},
},
},
}
}
func capiCluster() *clusterv1.Cluster {
return &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: constants.EksaSystemNamespace,
Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test"},
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetwork{
APIServerPort: nil,
Services: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"10.96.0.0/12"},
},
Pods: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"192.168.0.0/16"},
},
},
ControlPlaneEndpoint: clusterv1.APIEndpoint{
Host: "1.2.3.4",
Port: 6443,
},
ControlPlaneRef: &corev1.ObjectReference{
Kind: "KubeadmControlPlane",
Name: "test",
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
InfrastructureRef: &corev1.ObjectReference{
Kind: "TinkerbellCluster",
Name: "test",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
},
}
}
func secret() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: "registry-credentials",
Labels: map[string]string{
"clusterctl.cluster.x-k8s.io/move": "true",
},
},
StringData: map[string]string{
"username": "username",
"password": "password",
},
}
}
| 668 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/common"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
"github.com/aws/eks-anywhere/pkg/types"
)
func (p *Provider) BootstrapClusterOpts(_ *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error) {
opts, err := common.BootstrapClusterOpts(p.clusterConfig, p.datacenterConfig.Spec.TinkerbellIP)
if err != nil {
return nil, err
}
opts = append(opts, bootstrapper.WithExtraPortMappings(tinkerbellStackPorts))
return opts, nil
}
func (p *Provider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
logger.V(4).Info("Installing Tinkerbell stack on bootstrap cluster")
err := p.stackInstaller.Install(
ctx,
clusterSpec.VersionsBundle.Tinkerbell,
p.tinkerbellIP,
cluster.KubeconfigFile,
p.datacenterConfig.Spec.HookImagesURLPath,
stack.WithBootsOnDocker(),
stack.WithHostPortEnabled(true), // enable host port on bootstrap cluster
)
if err != nil {
return fmt.Errorf("install Tinkerbell stack on bootstrap cluster: %v", err)
}
return nil
}
func (p *Provider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return p.applyHardware(ctx, cluster)
}
// ApplyHardwareToCluster adds all the hardwares to the cluster.
func (p *Provider) applyHardware(ctx context.Context, cluster *types.Cluster) error {
hardwareSpec, err := hardware.MarshalCatalogue(p.catalogue)
if err != nil {
return fmt.Errorf("failed marshalling resources for hardware spec: %v", err)
}
err = p.providerKubectlClient.ApplyKubeSpecFromBytesForce(ctx, cluster, hardwareSpec)
if err != nil {
return fmt.Errorf("applying hardware yaml: %v", err)
}
if len(p.catalogue.AllBMCs()) > 0 {
err = p.providerKubectlClient.WaitForRufioMachines(ctx, cluster, "5m", "Contactable", constants.EksaSystemNamespace)
if err != nil {
return fmt.Errorf("waiting for baseboard management to be contactable: %v", err)
}
}
return nil
}
func (p *Provider) PostWorkloadInit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
logger.V(4).Info("Installing Tinkerbell stack on workload cluster")
if p.datacenterConfig.Spec.SkipLoadBalancerDeployment {
logger.Info("Warning: Skipping load balancer deployment. Please install and configure a load balancer once the cluster is created.")
}
err := p.stackInstaller.Install(
ctx,
clusterSpec.VersionsBundle.Tinkerbell,
p.templateBuilder.datacenterSpec.TinkerbellIP,
cluster.KubeconfigFile,
p.datacenterConfig.Spec.HookImagesURLPath,
stack.WithBootsOnKubernetes(),
stack.WithHostPortEnabled(false), // disable host port on workload cluster
stack.WithEnvoyEnabled(true), // use envoy on workload cluster
stack.WithLoadBalancerEnabled(
len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations) != 0 && // load balancer is handled by kube-vip in control plane nodes
!p.datacenterConfig.Spec.SkipLoadBalancerDeployment), // configure load balancer based on datacenterConfig.Spec.SkipLoadBalancerDeployment
)
if err != nil {
return fmt.Errorf("installing stack on workload cluster: %v", err)
}
if err := p.stackInstaller.UninstallLocal(ctx); err != nil {
return err
}
return nil
}
func (p *Provider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error {
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
return errExternalEtcdUnsupported
}
if err := p.stackInstaller.CleanupLocalBoots(ctx, p.forceCleanup); err != nil {
return err
}
// TODO(chrisdoherty4) Extract to a defaulting construct and add associated validations to ensure
// there is always a user with ssh key configured.
if err := p.configureSshKeys(); err != nil {
return err
}
if p.hardwareCSVIsProvided() {
if err := p.readCSVToCatalogue(); err != nil {
return err
}
}
spec := NewClusterSpec(clusterSpec, p.machineConfigs, p.datacenterConfig)
if p.clusterConfig.IsManaged() {
for _, mc := range p.MachineConfigs(clusterSpec) {
em, err := p.providerKubectlClient.SearchTinkerbellMachineConfig(ctx, mc.GetName(), clusterSpec.ManagementCluster.KubeconfigFile, mc.GetNamespace())
if err != nil {
return err
}
if len(em) > 0 {
return fmt.Errorf("TinkerbellMachineConfig %s already exists", mc.GetName())
}
}
existingDatacenter, err := p.providerKubectlClient.SearchTinkerbellDatacenterConfig(ctx, p.datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
if len(existingDatacenter) > 0 {
return fmt.Errorf("TinkerbellDatacenterConfig %s already exists", p.datacenterConfig.Name)
}
if err := p.getHardwareFromManagementCluster(ctx, clusterSpec.ManagementCluster); err != nil {
return err
}
// for workload cluster use tinkerbell IP of the management cluster
managementClusterSpec, err := p.providerKubectlClient.GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.ManagementCluster.Name)
if err != nil {
return err
}
managementDatacenterConfig, err := p.providerKubectlClient.GetEksaTinkerbellDatacenterConfig(ctx, managementClusterSpec.Spec.DatacenterRef.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return fmt.Errorf("getting TinkerbellIP of management cluster: %s", err)
}
// Checking for empty first as that returns a different error in the datacenter config validate method below
if p.datacenterConfig.Spec.TinkerbellIP != "" && p.datacenterConfig.Spec.TinkerbellIP != managementDatacenterConfig.Spec.TinkerbellIP {
return fmt.Errorf("TinkerbellIP %v does not match management cluster ip %v", p.datacenterConfig.Spec.TinkerbellIP, managementDatacenterConfig.Spec.TinkerbellIP)
}
}
// TODO(chrisdoherty4) Look to inject the validator. Possibly look to use a builder for
// constructing the validations rather than injecting flags into the provider.
clusterSpecValidator := NewClusterSpecValidator(
MinimumHardwareAvailableAssertionForCreate(p.catalogue),
HardwareSatisfiesOnlyOneSelectorAssertion(p.catalogue),
)
clusterSpecValidator.Register(AssertPortsNotInUse(p.netClient))
if !p.skipIpCheck {
clusterSpecValidator.Register(NewIPNotInUseAssertion(p.netClient))
if !p.clusterConfig.IsManaged() {
clusterSpecValidator.Register(AssertTinkerbellIPNotInUse(p.netClient))
}
}
// Validate must happen last beacuse we depend on the catalogue entries for some checks.
if err := clusterSpecValidator.Validate(spec); err != nil {
return err
}
if p.clusterConfig.IsManaged() {
return p.applyHardware(ctx, clusterSpec.ManagementCluster)
}
return nil
}
func (p *Provider) getHardwareFromManagementCluster(ctx context.Context, cluster *types.Cluster) error {
// Retrieve all unprovisioned hardware from the management cluster and populate the catalogue so
// it can be considered for the workload creation.
hardware, err := p.providerKubectlClient.GetUnprovisionedTinkerbellHardware(
ctx,
cluster.KubeconfigFile,
constants.EksaSystemNamespace,
)
if err != nil {
return fmt.Errorf("retrieving unprovisioned hardware: %v", err)
}
for i := range hardware {
if err := p.catalogue.InsertHardware(&hardware[i]); err != nil {
return err
}
}
// Retrieve all provisioned hardware from the management cluster and populate diskExtractors's
// disksProvisionedHardware map for use during workload creation
hardware, err = p.providerKubectlClient.GetProvisionedTinkerbellHardware(
ctx,
cluster.KubeconfigFile,
constants.EksaSystemNamespace,
)
if err != nil {
return fmt.Errorf("retrieving provisioned hardware: %v", err)
}
// Remove all the provisioned hardware from the existing cluster if repeated from the hardware csv input.
if err := p.catalogue.RemoveHardwares(hardware); err != nil {
return err
}
return nil
}
func (p *Provider) readCSVToCatalogue() error {
// Create a catalogue writer used to write hardware to the catalogue.
catalogueWriter := hardware.NewMachineCatalogueWriter(p.catalogue)
machineValidator := hardware.NewDefaultMachineValidator()
// Translate all Machine instances from the p.machines source into Kubernetes object types.
// The PostBootstrapSetup() call invoked elsewhere in the program serializes the catalogue
// and submits it to the clsuter.
machines, err := hardware.NewNormalizedCSVReaderFromFile(p.hardwareCSVFile)
if err != nil {
return err
}
return hardware.TranslateAll(machines, catalogueWriter, machineValidator)
}
| 239 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/types"
)
func (p *Provider) SetupAndValidateDeleteCluster(ctx context.Context, cluster *types.Cluster, _ *cluster.Spec) error {
// noop
return nil
}
func (p *Provider) DeleteResources(ctx context.Context, clusterSpec *cluster.Spec) error {
for _, mc := range p.machineConfigs {
if err := p.providerKubectlClient.DeleteEksaMachineConfig(ctx, eksaTinkerbellDatacenterResourceType, mc.Name, clusterSpec.ManagementCluster.KubeconfigFile, mc.Namespace); err != nil {
return err
}
}
return p.providerKubectlClient.DeleteEksaDatacenterConfig(ctx, eksaTinkerbellMachineResourceType, p.datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, p.datacenterConfig.Namespace)
}
func (p *Provider) PostClusterDeleteValidate(ctx context.Context, managementCluster *types.Cluster) error {
if err := p.stackInstaller.UninstallLocal(ctx); err != nil {
return err
}
return nil
}
| 31 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"github.com/pkg/errors"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
"k8s.io/apimachinery/pkg/api/equality"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
)
// GetMachineTemplate gets a TinkerbellMachineTemplate object using the provided client
// If the object doesn't exist, it returns a NotFound error.
func GetMachineTemplate(ctx context.Context, client kubernetes.Client, name, namespace string) (*tinkerbellv1.TinkerbellMachineTemplate, error) {
m := &tinkerbellv1.TinkerbellMachineTemplate{}
if err := client.Get(ctx, name, namespace, m); err != nil {
return nil, errors.Wrap(err, "reading tinkerbellMachineTemplate")
}
return m, nil
}
// machineTemplateEqual returns a boolean indicating whether the provided TinkerbellMachineTemplates are equal.
func machineTemplateEqual(new, old *tinkerbellv1.TinkerbellMachineTemplate) bool {
return equality.Semantic.DeepDerivative(new.Spec, old.Spec)
}
| 28 |
eks-anywhere | aws | Go | package tinkerbell
import (
"fmt"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/providers/common"
)
const defaultUsername = "ec2-user"
func ensureMachineConfigsHaveAtLeast1User(machines map[string]*v1alpha1.TinkerbellMachineConfig) {
for _, machine := range machines {
if len(machine.Spec.Users) == 0 {
machine.Spec.Users = []v1alpha1.UserConfiguration{{Name: defaultUsername}}
}
}
}
func extractUserConfigurationsWithoutSshKeys(machines map[string]*v1alpha1.TinkerbellMachineConfig) []*v1alpha1.UserConfiguration {
var users []*v1alpha1.UserConfiguration
for _, machine := range machines {
if len(machine.Spec.Users[0].SshAuthorizedKeys) == 0 || len(machine.Spec.Users[0].SshAuthorizedKeys[0]) == 0 {
users = append(users, &machine.Spec.Users[0])
}
}
return users
}
func applySshKeyToUsers(users []*v1alpha1.UserConfiguration, key string) {
for _, user := range users {
if len(user.SshAuthorizedKeys) == 0 {
user.SshAuthorizedKeys = make([]string, 1)
}
user.SshAuthorizedKeys[0] = key
}
}
func stripCommentsFromSshKeys(machines map[string]*v1alpha1.TinkerbellMachineConfig) error {
for _, machine := range machines {
key, err := common.StripSshAuthorizedKeyComment(machine.Spec.Users[0].SshAuthorizedKeys[0])
if err != nil {
return fmt.Errorf("TinkerbellMachineConfig name=%v: %v", machine.Name, err)
}
machine.Spec.Users[0].SshAuthorizedKeys[0] = key
}
return nil
}
func (p *Provider) configureSshKeys() error {
ensureMachineConfigsHaveAtLeast1User(p.machineConfigs)
users := extractUserConfigurationsWithoutSshKeys(p.machineConfigs)
if len(users) > 0 {
publicAuthorizedKey, err := p.keyGenerator.GenerateSSHAuthKey(p.writer)
if err != nil {
return err
}
applySshKeyToUsers(users, publicAuthorizedKey)
}
if err := stripCommentsFromSshKeys(p.machineConfigs); err != nil {
return fmt.Errorf("stripping ssh key comment: %v", err)
}
return nil
}
| 73 |
eks-anywhere | aws | Go | package tinkerbell
import (
"bufio"
"bytes"
"context"
_ "embed"
"fmt"
"io"
"strings"
"time"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/crypto"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/providers/common"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/registrymirror/containerd"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
unstructuredutil "github.com/aws/eks-anywhere/pkg/utils/unstructured"
)
//go:embed config/template-cp.yaml
var defaultCAPIConfigCP string
//go:embed config/template-md.yaml
var defaultClusterConfigMD string
const (
TinkerbellMachineTemplateKind = "TinkerbellMachineTemplate"
defaultRegistry = "public.ecr.aws"
)
type TemplateBuilder struct {
controlPlaneMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
datacenterSpec *v1alpha1.TinkerbellDatacenterConfigSpec
WorkerNodeGroupMachineSpecs map[string]v1alpha1.TinkerbellMachineConfigSpec
etcdMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
tinkerbellIP string
now types.NowFunc
}
// NewTemplateBuilder creates a new TemplateBuilder instance.
func NewTemplateBuilder(datacenterSpec *v1alpha1.TinkerbellDatacenterConfigSpec, controlPlaneMachineSpec, etcdMachineSpec *v1alpha1.TinkerbellMachineConfigSpec, workerNodeGroupMachineSpecs map[string]v1alpha1.TinkerbellMachineConfigSpec, tinkerbellIP string, now types.NowFunc) providers.TemplateBuilder {
return &TemplateBuilder{
controlPlaneMachineSpec: controlPlaneMachineSpec,
datacenterSpec: datacenterSpec,
WorkerNodeGroupMachineSpecs: workerNodeGroupMachineSpecs,
etcdMachineSpec: etcdMachineSpec,
tinkerbellIP: tinkerbellIP,
now: now,
}
}
func (tb *TemplateBuilder) GenerateCAPISpecControlPlane(clusterSpec *cluster.Spec, buildOptions ...providers.BuildMapOption) (content []byte, err error) {
cpTemplateConfig := clusterSpec.TinkerbellTemplateConfigs[tb.controlPlaneMachineSpec.TemplateRef.Name]
if cpTemplateConfig == nil {
versionBundle := clusterSpec.VersionsBundle.VersionsBundle
cpTemplateConfig = v1alpha1.NewDefaultTinkerbellTemplateConfigCreate(clusterSpec.Cluster, *versionBundle, tb.datacenterSpec.OSImageURL, tb.tinkerbellIP, tb.datacenterSpec.TinkerbellIP, tb.controlPlaneMachineSpec.OSFamily)
}
cpTemplateString, err := cpTemplateConfig.ToTemplateString()
if err != nil {
return nil, fmt.Errorf("failed to get Control Plane TinkerbellTemplateConfig: %v", err)
}
var etcdMachineSpec v1alpha1.TinkerbellMachineConfigSpec
var etcdTemplateString string
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdMachineSpec = *tb.etcdMachineSpec
etcdTemplateConfig := clusterSpec.TinkerbellTemplateConfigs[tb.etcdMachineSpec.TemplateRef.Name]
if etcdTemplateConfig == nil {
versionBundle := clusterSpec.VersionsBundle.VersionsBundle
etcdTemplateConfig = v1alpha1.NewDefaultTinkerbellTemplateConfigCreate(clusterSpec.Cluster, *versionBundle, tb.datacenterSpec.OSImageURL, tb.tinkerbellIP, tb.datacenterSpec.TinkerbellIP, tb.etcdMachineSpec.OSFamily)
}
etcdTemplateString, err = etcdTemplateConfig.ToTemplateString()
if err != nil {
return nil, fmt.Errorf("failed to get ETCD TinkerbellTemplateConfig: %v", err)
}
}
values, err := buildTemplateMapCP(clusterSpec, *tb.controlPlaneMachineSpec, etcdMachineSpec, cpTemplateString, etcdTemplateString, *tb.datacenterSpec)
if err != nil {
return nil, err
}
for _, buildOption := range buildOptions {
buildOption(values)
}
bytes, err := templater.Execute(defaultCAPIConfigCP, values)
if err != nil {
return nil, err
}
return bytes, nil
}
func (tb *TemplateBuilder) GenerateCAPISpecWorkers(clusterSpec *cluster.Spec, workloadTemplateNames, kubeadmconfigTemplateNames map[string]string) (content []byte, err error) {
workerSpecs := make([][]byte, 0, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workerNodeMachineSpec := tb.WorkerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name]
wTemplateConfig := clusterSpec.TinkerbellTemplateConfigs[workerNodeMachineSpec.TemplateRef.Name]
if wTemplateConfig == nil {
versionBundle := clusterSpec.VersionsBundle.VersionsBundle
wTemplateConfig = v1alpha1.NewDefaultTinkerbellTemplateConfigCreate(clusterSpec.Cluster, *versionBundle, tb.datacenterSpec.OSImageURL, tb.tinkerbellIP, tb.datacenterSpec.TinkerbellIP, workerNodeMachineSpec.OSFamily)
}
wTemplateString, err := wTemplateConfig.ToTemplateString()
if err != nil {
return nil, fmt.Errorf("failed to get worker TinkerbellTemplateConfig: %v", err)
}
values, err := buildTemplateMapMD(clusterSpec, tb.WorkerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name], workerNodeGroupConfiguration, wTemplateString, *tb.datacenterSpec)
if err != nil {
return nil, err
}
_, ok := workloadTemplateNames[workerNodeGroupConfiguration.Name]
if workloadTemplateNames == nil || !ok {
return nil, fmt.Errorf("workloadTemplateNames invalid in GenerateCAPISpecWorkers: %v", err)
}
_, ok = kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name]
if kubeadmconfigTemplateNames == nil || !ok {
return nil, fmt.Errorf("kubeadmconfigTemplateNames invalid in GenerateCAPISpecWorkers: %v", err)
}
values["workerSshAuthorizedKey"] = tb.WorkerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name].Users[0].SshAuthorizedKeys[0]
values["workerReplicas"] = *workerNodeGroupConfiguration.Count
values["workloadTemplateName"] = workloadTemplateNames[workerNodeGroupConfiguration.Name]
values["workerNodeGroupName"] = workerNodeGroupConfiguration.Name
values["workloadkubeadmconfigTemplateName"] = kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name]
values["autoscalingConfig"] = workerNodeGroupConfiguration.AutoScalingConfiguration
if workerNodeGroupConfiguration.UpgradeRolloutStrategy != nil {
values["upgradeRolloutStrategy"] = true
values["maxSurge"] = workerNodeGroupConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge
values["maxUnavailable"] = workerNodeGroupConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxUnavailable
}
bytes, err := templater.Execute(defaultClusterConfigMD, values)
if err != nil {
return nil, err
}
workerSpecs = append(workerSpecs, bytes)
}
return templater.AppendYamlResources(workerSpecs...), nil
}
func (p *Provider) generateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
clusterName := newClusterSpec.Cluster.Name
var controlPlaneTemplateName, workloadTemplateName, kubeadmconfigTemplateName, etcdTemplateName string
var needsNewEtcdTemplate bool
c, err := p.providerKubectlClient.GetEksaCluster(ctx, workloadCluster, newClusterSpec.Cluster.Name)
if err != nil {
return nil, nil, err
}
vdc, err := p.providerKubectlClient.GetEksaTinkerbellDatacenterConfig(ctx, p.datacenterConfig.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return nil, nil, err
}
needsNewControlPlaneTemplate := needsNewControlPlaneTemplate(currentSpec, newClusterSpec)
if !needsNewControlPlaneTemplate {
cp, err := p.providerKubectlClient.GetKubeadmControlPlane(ctx, workloadCluster, c.Name, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
controlPlaneTemplateName = cp.Spec.MachineTemplate.InfrastructureRef.Name
} else {
controlPlaneTemplateName = common.CPMachineTemplateName(clusterName, p.templateBuilder.now)
}
previousWorkerNodeGroupConfigs := cluster.BuildMapForWorkerNodeGroupsByName(currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations)
workloadTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
kubeadmconfigTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
needsNewWorkloadTemplate, err := p.needsNewMachineTemplate(ctx, workloadCluster, currentSpec, newClusterSpec, workerNodeGroupConfiguration, vdc, previousWorkerNodeGroupConfigs)
if err != nil {
return nil, nil, err
}
needsNewKubeadmConfigTemplate, err := p.needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs)
if err != nil {
return nil, nil, err
}
if !needsNewKubeadmConfigTemplate {
mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name)
md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
kubeadmconfigTemplateName = md.Spec.Template.Spec.Bootstrap.ConfigRef.Name
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName
} else {
kubeadmconfigTemplateName = common.KubeadmConfigTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName
}
if !needsNewWorkloadTemplate {
mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name)
md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
workloadTemplateName = md.Spec.Template.Spec.InfrastructureRef.Name
workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName
} else {
workloadTemplateName = common.WorkerMachineTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName
}
p.templateBuilder.WorkerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name] = p.machineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name].Spec
}
// @TODO: upgrade of external etcd
if newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
// etcdMachineConfig := p.machineConfigs[newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name]
// etcdMachineTmc, err := p.providerKubectlClient.GetEksaTinkerbellMachineConfig(ctx, c.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
// if err != nil {
// return nil, nil, err
// }
// needsNewEtcdTemplate = NeedsNewEtcdTemplate(currentSpec, newClusterSpec, vdc, p.datacenterConfig, etcdMachineTmc, etcdMachineConfig)
/*** @TODO: hardcoding this to false, remove later *****/
needsNewEtcdTemplate = false
if !needsNewEtcdTemplate {
etcdadmCluster, err := p.providerKubectlClient.GetEtcdadmCluster(ctx, workloadCluster, clusterName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
etcdTemplateName = etcdadmCluster.Spec.InfrastructureTemplate.Name
} else {
/* During a cluster upgrade, etcd machines need to be upgraded first, so that the etcd machines with new spec get created and can be used by controlplane machines
as etcd endpoints. KCP rollout should not start until then. As a temporary solution in the absence of static etcd endpoints, we annotate the etcd cluster as "upgrading",
so that KCP checks this annotation and does not proceed if etcd cluster is upgrading. The etcdadm controller removes this annotation once the etcd upgrade is complete.
*/
err = p.providerKubectlClient.UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", clusterName),
map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"},
executables.WithCluster(bootstrapCluster),
executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
etcdTemplateName = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now)
}
}
cpOpt := func(values map[string]interface{}) {
values["controlPlaneTemplateName"] = controlPlaneTemplateName
values["controlPlaneSshAuthorizedKey"] = p.machineConfigs[p.clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec.Users[0].SshAuthorizedKeys[0]
if newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
values["etcdSshAuthorizedKey"] = p.machineConfigs[p.clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.Users[0].SshAuthorizedKeys[0]
}
values["etcdTemplateName"] = etcdTemplateName
}
controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(newClusterSpec, cpOpt)
if err != nil {
return nil, nil, err
}
workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(newClusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames)
if err != nil {
return nil, nil, err
}
if p.isScaleUpDown(currentSpec.Cluster, newClusterSpec.Cluster) {
cpSpec, err := omitTinkerbellMachineTemplate(controlPlaneSpec)
if err == nil {
if wSpec, err := omitTinkerbellMachineTemplate(workersSpec); err == nil {
return cpSpec, wSpec, nil
}
}
}
return controlPlaneSpec, workersSpec, nil
}
func (p *Provider) GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
controlPlaneSpec, workersSpec, err = p.generateCAPISpecForUpgrade(ctx, bootstrapCluster, workloadCluster, currentSpec, clusterSpec)
if err != nil {
return nil, nil, fmt.Errorf("error generating cluster api spec contents: %v", err)
}
return controlPlaneSpec, workersSpec, nil
}
func (p *Provider) GenerateCAPISpecForCreate(ctx context.Context, _ *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
controlPlaneSpec, workersSpec, err = p.generateCAPISpecForCreate(ctx, clusterSpec)
if err != nil {
return nil, nil, fmt.Errorf("generating cluster api spec contents: %v", err)
}
return controlPlaneSpec, workersSpec, nil
}
func (p *Provider) generateCAPISpecForCreate(ctx context.Context, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
clusterName := clusterSpec.Cluster.Name
cpOpt := func(values map[string]interface{}) {
values["controlPlaneTemplateName"] = common.CPMachineTemplateName(clusterName, p.templateBuilder.now)
values["controlPlaneSshAuthorizedKey"] = p.machineConfigs[p.clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec.Users[0].SshAuthorizedKeys[0]
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
values["etcdSshAuthorizedKey"] = p.machineConfigs[p.clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.Users[0].SshAuthorizedKeys[0]
}
values["etcdTemplateName"] = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now)
}
controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(clusterSpec, cpOpt)
if err != nil {
return nil, nil, err
}
workloadTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
kubeadmconfigTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workloadTemplateNames[workerNodeGroupConfiguration.Name] = common.WorkerMachineTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = common.KubeadmConfigTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
p.templateBuilder.WorkerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name] = p.machineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name].Spec
}
workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(clusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames)
if err != nil {
return nil, nil, err
}
return controlPlaneSpec, workersSpec, nil
}
func (p *Provider) needsNewMachineTemplate(ctx context.Context, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, vdc *v1alpha1.TinkerbellDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (bool, error) {
if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
return needsNewWorkloadTemplate(currentSpec, newClusterSpec), nil
}
return true, nil
}
func (p *Provider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (bool, error) {
if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
existingWorkerNodeGroupConfig := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]
return needsNewKubeadmConfigTemplate(&workerNodeGroupConfiguration, &existingWorkerNodeGroupConfig), nil
}
return true, nil
}
func machineDeploymentName(clusterName, nodeGroupName string) string {
return fmt.Sprintf("%s-%s", clusterName, nodeGroupName)
}
// nolint:gocyclo
func buildTemplateMapCP(
clusterSpec *cluster.Spec,
controlPlaneMachineSpec,
etcdMachineSpec v1alpha1.TinkerbellMachineConfigSpec,
cpTemplateOverride,
etcdTemplateOverride string,
datacenterSpec v1alpha1.TinkerbellDatacenterConfigSpec,
) (map[string]interface{}, error) {
bundle := clusterSpec.VersionsBundle
format := "cloud-config"
apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig).
Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)).
Append(clusterapi.PodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig))
// LoadBalancerClass is feature gated in K8S v1.21 and needs to be enabled manually
if clusterSpec.Cluster.Spec.KubernetesVersion == v1alpha1.Kube121 {
apiServerExtraArgs.Append(clusterapi.FeatureGatesExtraArgs("ServiceLoadBalancerClass=true"))
}
kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs().
Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)).
Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration))
values := map[string]interface{}{
"clusterName": clusterSpec.Cluster.Name,
"controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host,
"controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count,
"controlPlaneSshAuthorizedKey": controlPlaneMachineSpec.Users[0].SshAuthorizedKeys[0],
"controlPlaneSshUsername": controlPlaneMachineSpec.Users[0].Name,
"eksaSystemNamespace": constants.EksaSystemNamespace,
"format": format,
"kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag,
"kubeVipImage": bundle.Tinkerbell.KubeVip.VersionedImage(),
"podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks,
"serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks,
"apiserverExtraArgs": apiServerExtraArgs.ToPartialYaml(),
"baseRegistry": "", // TODO: need to get this values for creating template IMAGE_URL
"osDistro": "", // TODO: need to get this values for creating template IMAGE_URL
"osVersion": "", // TODO: need to get this values for creating template IMAGE_URL
"kubernetesRepository": bundle.KubeDistro.Kubernetes.Repository,
"corednsRepository": bundle.KubeDistro.CoreDNS.Repository,
"corednsVersion": bundle.KubeDistro.CoreDNS.Tag,
"etcdRepository": bundle.KubeDistro.Etcd.Repository,
"etcdImageTag": bundle.KubeDistro.Etcd.Tag,
"externalEtcdVersion": bundle.KubeDistro.EtcdVersion,
"etcdCipherSuites": crypto.SecureCipherSuitesString(),
"kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(),
"hardwareSelector": controlPlaneMachineSpec.HardwareSelector,
"controlPlaneTaints": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints,
"workerNodeGroupConfigurations": clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations,
"skipLoadBalancerDeployment": datacenterSpec.SkipLoadBalancerDeployment,
"cpSkipLoadBalancerDeployment": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.SkipLoadBalancerDeployment,
}
if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil {
values["upgradeRolloutStrategy"] = true
values["maxSurge"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge
}
if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil {
values, err := populateRegistryMirrorValues(clusterSpec, values)
if err != nil {
return values, err
}
// Replace public.ecr.aws endpoint with the endpoint given in the cluster config file
localRegistry := values["publicMirror"].(string)
cpTemplateOverride = strings.ReplaceAll(cpTemplateOverride, defaultRegistry, localRegistry)
etcdTemplateOverride = strings.ReplaceAll(etcdTemplateOverride, defaultRegistry, localRegistry)
}
if clusterSpec.Cluster.Spec.ProxyConfiguration != nil {
values["proxyConfig"] = true
values["httpProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpProxy
values["httpsProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpsProxy
values["noProxy"] = generateNoProxyList(clusterSpec.Cluster, datacenterSpec)
}
values["controlPlanetemplateOverride"] = cpTemplateOverride
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
values["externalEtcd"] = true
values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count
values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name
values["etcdTemplateOverride"] = etcdTemplateOverride
values["etcdHardwareSelector"] = etcdMachineSpec.HardwareSelector
}
if controlPlaneMachineSpec.OSFamily == v1alpha1.Bottlerocket {
values["format"] = string(v1alpha1.Bottlerocket)
values["pauseRepository"] = bundle.KubeDistro.Pause.Image()
values["pauseVersion"] = bundle.KubeDistro.Pause.Tag()
values["bottlerocketBootstrapRepository"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Image()
values["bottlerocketBootstrapVersion"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Tag()
}
if clusterSpec.AWSIamConfig != nil {
values["awsIamAuth"] = true
}
if controlPlaneMachineSpec.HostOSConfiguration != nil {
if controlPlaneMachineSpec.HostOSConfiguration.NTPConfiguration != nil {
values["cpNtpServers"] = controlPlaneMachineSpec.HostOSConfiguration.NTPConfiguration.Servers
}
if controlPlaneMachineSpec.HostOSConfiguration.CertBundles != nil {
values["certBundles"] = controlPlaneMachineSpec.HostOSConfiguration.CertBundles
}
brSettings, err := common.GetCAPIBottlerocketSettingsConfig(controlPlaneMachineSpec.HostOSConfiguration.BottlerocketConfiguration)
if err != nil {
return nil, err
}
values["bottlerocketSettings"] = brSettings
}
return values, nil
}
func buildTemplateMapMD(
clusterSpec *cluster.Spec,
workerNodeGroupMachineSpec v1alpha1.TinkerbellMachineConfigSpec,
workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration,
workerTemplateOverride string,
datacenterSpec v1alpha1.TinkerbellDatacenterConfigSpec,
) (map[string]interface{}, error) {
bundle := clusterSpec.VersionsBundle
format := "cloud-config"
kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs().
Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)).
Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf))
values := map[string]interface{}{
"clusterName": clusterSpec.Cluster.Name,
"eksaSystemNamespace": constants.EksaSystemNamespace,
"kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(),
"format": format,
"kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag,
"workerNodeGroupName": workerNodeGroupConfiguration.Name,
"workerSshAuthorizedKey": workerNodeGroupMachineSpec.Users[0].SshAuthorizedKeys[0],
"workerSshUsername": workerNodeGroupMachineSpec.Users[0].Name,
"hardwareSelector": workerNodeGroupMachineSpec.HardwareSelector,
"workerNodeGroupTaints": workerNodeGroupConfiguration.Taints,
}
if workerNodeGroupMachineSpec.OSFamily == v1alpha1.Bottlerocket {
values["format"] = string(v1alpha1.Bottlerocket)
values["pauseRepository"] = bundle.KubeDistro.Pause.Image()
values["pauseVersion"] = bundle.KubeDistro.Pause.Tag()
values["bottlerocketBootstrapRepository"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Image()
values["bottlerocketBootstrapVersion"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Tag()
}
if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil {
values, err := populateRegistryMirrorValues(clusterSpec, values)
if err != nil {
return values, err
}
// Replace public.ecr.aws endpoint with the endpoint given in the cluster config file
localRegistry := values["publicMirror"].(string)
workerTemplateOverride = strings.ReplaceAll(workerTemplateOverride, defaultRegistry, localRegistry)
}
if clusterSpec.Cluster.Spec.ProxyConfiguration != nil {
values["proxyConfig"] = true
values["httpProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpProxy
values["httpsProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpsProxy
values["noProxy"] = generateNoProxyList(clusterSpec.Cluster, datacenterSpec)
}
values["workertemplateOverride"] = workerTemplateOverride
if workerNodeGroupMachineSpec.HostOSConfiguration != nil {
if workerNodeGroupMachineSpec.HostOSConfiguration.NTPConfiguration != nil {
values["ntpServers"] = workerNodeGroupMachineSpec.HostOSConfiguration.NTPConfiguration.Servers
}
if workerNodeGroupMachineSpec.HostOSConfiguration.CertBundles != nil {
values["certBundles"] = workerNodeGroupMachineSpec.HostOSConfiguration.CertBundles
}
brSettings, err := common.GetCAPIBottlerocketSettingsConfig(workerNodeGroupMachineSpec.HostOSConfiguration.BottlerocketConfiguration)
if err != nil {
return nil, err
}
values["bottlerocketSettings"] = brSettings
}
return values, nil
}
// omitTinkerbellMachineTemplate removes TinkerbellMachineTemplate API objects from yml. yml is
// typically an EKSA cluster configuration.
func omitTinkerbellMachineTemplate(yml []byte) ([]byte, error) {
var filtered []unstructured.Unstructured
r := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yml)))
for {
d, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
var m map[string]interface{}
if err := yamlutil.Unmarshal(d, &m); err != nil {
return nil, err
}
var u unstructured.Unstructured
u.SetUnstructuredContent(m)
// Omit TinkerbellMachineTemplate kind.
if u.GetKind() == TinkerbellMachineTemplateKind {
continue
}
filtered = append(filtered, u)
}
return unstructuredutil.UnstructuredToYaml(filtered)
}
func populateRegistryMirrorValues(clusterSpec *cluster.Spec, values map[string]interface{}) (map[string]interface{}, error) {
registryMirror := registrymirror.FromCluster(clusterSpec.Cluster)
values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap)
values["mirrorBase"] = registryMirror.BaseRegistry
values["insecureSkip"] = registryMirror.InsecureSkipVerify
values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror())
if len(registryMirror.CACertContent) > 0 {
values["registryCACert"] = registryMirror.CACertContent
}
if registryMirror.Auth {
values["registryAuth"] = registryMirror.Auth
username, password, err := config.ReadCredentials()
if err != nil {
return values, err
}
values["registryUsername"] = username
values["registryPassword"] = password
}
return values, nil
}
func getControlPlaneMachineSpec(clusterSpec *cluster.Spec) (*v1alpha1.TinkerbellMachineConfigSpec, error) {
var controlPlaneMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef != nil && clusterSpec.TinkerbellMachineConfigs[clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] != nil {
controlPlaneMachineSpec = &clusterSpec.TinkerbellMachineConfigs[clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec
}
return controlPlaneMachineSpec, nil
}
func getWorkerNodeGroupMachineSpec(clusterSpec *cluster.Spec) (map[string]v1alpha1.TinkerbellMachineConfigSpec, error) {
var workerNodeGroupMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
workerNodeGroupMachineSpecs := make(map[string]v1alpha1.TinkerbellMachineConfigSpec, len(clusterSpec.TinkerbellMachineConfigs))
for _, wnConfig := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
if wnConfig.MachineGroupRef != nil && clusterSpec.TinkerbellMachineConfigs[wnConfig.MachineGroupRef.Name] != nil {
workerNodeGroupMachineSpec = &clusterSpec.TinkerbellMachineConfigs[wnConfig.MachineGroupRef.Name].Spec
workerNodeGroupMachineSpecs[wnConfig.MachineGroupRef.Name] = *workerNodeGroupMachineSpec
}
}
return workerNodeGroupMachineSpecs, nil
}
func getEtcdMachineSpec(clusterSpec *cluster.Spec) (*v1alpha1.TinkerbellMachineConfigSpec, error) {
var etcdMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef != nil && clusterSpec.TinkerbellMachineConfigs[clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name] != nil {
etcdMachineSpec = &clusterSpec.TinkerbellMachineConfigs[clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec
}
}
return etcdMachineSpec, nil
}
func generateTemplateBuilder(clusterSpec *cluster.Spec) (providers.TemplateBuilder, error) {
controlPlaneMachineSpec, err := getControlPlaneMachineSpec(clusterSpec)
if err != nil {
return nil, errors.Wrap(err, "generating control plane machine spec")
}
workerNodeGroupMachineSpecs, err := getWorkerNodeGroupMachineSpec(clusterSpec)
if err != nil {
return nil, errors.Wrap(err, "generating worker node group machine specs")
}
etcdMachineSpec, err := getEtcdMachineSpec(clusterSpec)
if err != nil {
return nil, errors.Wrap(err, "generating etcd machine spec")
}
templateBuilder := NewTemplateBuilder(&clusterSpec.TinkerbellDatacenter.Spec,
controlPlaneMachineSpec,
etcdMachineSpec,
workerNodeGroupMachineSpecs,
clusterSpec.TinkerbellDatacenter.Spec.TinkerbellIP,
time.Now,
)
return templateBuilder, nil
}
// GenerateNoProxyList generates NOPROXY list for tinkerbell provider based on HTTP_PROXY, HTTPS_PROXY, NOPROXY and tinkerbellIP.
func generateNoProxyList(clusterSpec *v1alpha1.Cluster, datacenterSpec v1alpha1.TinkerbellDatacenterConfigSpec) []string {
capacity := len(clusterSpec.Spec.ClusterNetwork.Pods.CidrBlocks) +
len(clusterSpec.Spec.ClusterNetwork.Services.CidrBlocks) +
len(clusterSpec.Spec.ProxyConfiguration.NoProxy) + 4
noProxyList := make([]string, 0, capacity)
noProxyList = append(noProxyList, clusterSpec.Spec.ClusterNetwork.Pods.CidrBlocks...)
noProxyList = append(noProxyList, clusterSpec.Spec.ClusterNetwork.Services.CidrBlocks...)
noProxyList = append(noProxyList, clusterSpec.Spec.ProxyConfiguration.NoProxy...)
noProxyList = append(noProxyList, clusterapi.NoProxyDefaults()...)
noProxyList = append(noProxyList,
clusterSpec.Spec.ControlPlaneConfiguration.Endpoint.Host,
datacenterSpec.TinkerbellIP,
)
return noProxyList
}
| 680 |
eks-anywhere | aws | Go | package tinkerbell
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func TestGenerateTemplateBuilder(t *testing.T) {
g := NewWithT(t)
clusterSpec := test.NewFullClusterSpec(t, testClusterConfigFilename)
expectedControlPlaneMachineSpec := &v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: map[string]string{"type": "cp"},
TemplateRef: v1alpha1.Ref{
Kind: "TinkerbellTemplateConfig",
Name: "tink-test",
},
OSFamily: "ubuntu",
Users: []v1alpha1.UserConfiguration{
{
Name: "tink-user",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
}
gotExpectedControlPlaneMachineSpec, err := getControlPlaneMachineSpec(clusterSpec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotExpectedControlPlaneMachineSpec).To(Equal(expectedControlPlaneMachineSpec))
expectedWorkerNodeGroupMachineSpec := map[string]v1alpha1.TinkerbellMachineConfigSpec{
"test-md": {
HardwareSelector: map[string]string{"type": "worker"},
TemplateRef: v1alpha1.Ref{
Kind: "TinkerbellTemplateConfig",
Name: "tink-test",
},
OSFamily: "ubuntu",
Users: []v1alpha1.UserConfiguration{
{
Name: "tink-user",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== [email protected]"},
},
},
},
}
gotWorkerNodeGroupMachineSpec, err := getWorkerNodeGroupMachineSpec(clusterSpec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotWorkerNodeGroupMachineSpec).To(Equal(expectedWorkerNodeGroupMachineSpec))
gotEtcdMachineSpec, err := getEtcdMachineSpec(clusterSpec)
var expectedEtcdMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotEtcdMachineSpec).To(Equal(expectedEtcdMachineSpec))
}
| 59 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"errors"
"fmt"
"time"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
corev1 "k8s.io/api/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/networkutils"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/providers/common"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/rufiounreleased"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
"github.com/aws/eks-anywhere/release/pkg/util/slices"
)
const (
maxRetries = 30
backOffPeriod = 5 * time.Second
)
var (
eksaTinkerbellDatacenterResourceType = fmt.Sprintf("tinkerbelldatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaTinkerbellMachineResourceType = fmt.Sprintf("tinkerbellmachineconfigs.%s", v1alpha1.GroupVersion.Group)
tinkerbellStackPorts = []int{42113, 50051, 50061}
// errExternalEtcdUnsupported is returned from create or update when the user attempts to create
// or upgrade a cluster with an external etcd configuration.
errExternalEtcdUnsupported = errors.New("external etcd configuration is unsupported")
)
type Provider struct {
clusterConfig *v1alpha1.Cluster
datacenterConfig *v1alpha1.TinkerbellDatacenterConfig
machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig
stackInstaller stack.StackInstaller
providerKubectlClient ProviderKubectlClient
templateBuilder *TemplateBuilder
writer filewriter.FileWriter
keyGenerator SSHAuthKeyGenerator
hardwareCSVFile string
catalogue *hardware.Catalogue
tinkerbellIP string
// TODO(chrisdoheryt4) Temporarily depend on the netclient until the validator can be injected.
// This is already a dependency, just uncached, because we require it during the initializing
// constructor call for constructing the validator in-line.
netClient networkutils.NetClient
forceCleanup bool
skipIpCheck bool
retrier *retrier.Retrier
}
type ProviderKubectlClient interface {
ApplyKubeSpecFromBytesForce(ctx context.Context, cluster *types.Cluster, data []byte) error
ApplyKubeSpecFromBytesWithNamespace(ctx context.Context, cluster *types.Cluster, data []byte, namespace string) error
DeleteEksaDatacenterConfig(ctx context.Context, eksaTinkerbellDatacenterResourceType string, tinkerbellDatacenterConfigName string, kubeconfigFile string, namespace string) error
DeleteEksaMachineConfig(ctx context.Context, eksaTinkerbellMachineResourceType string, tinkerbellMachineConfigName string, kubeconfigFile string, namespace string) error
GetMachineDeployment(ctx context.Context, machineDeploymentName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error)
GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error)
GetEksaTinkerbellDatacenterConfig(ctx context.Context, tinkerbellDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.TinkerbellDatacenterConfig, error)
GetEksaTinkerbellMachineConfig(ctx context.Context, tinkerbellMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.TinkerbellMachineConfig, error)
GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error)
GetEtcdadmCluster(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*etcdv1.EtcdadmCluster, error)
GetSecret(ctx context.Context, secretObjectName string, opts ...executables.KubectlOpt) (*corev1.Secret, error)
UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...executables.KubectlOpt) error
WaitForDeployment(ctx context.Context, cluster *types.Cluster, timeout string, condition string, target string, namespace string) error
GetUnprovisionedTinkerbellHardware(_ context.Context, kubeconfig, namespace string) ([]tinkv1alpha1.Hardware, error)
GetProvisionedTinkerbellHardware(_ context.Context, kubeconfig, namespace string) ([]tinkv1alpha1.Hardware, error)
WaitForRufioMachines(ctx context.Context, cluster *types.Cluster, timeout string, condition string, namespace string) error
SearchTinkerbellMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.TinkerbellMachineConfig, error)
SearchTinkerbellDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.TinkerbellDatacenterConfig, error)
AllTinkerbellHardware(ctx context.Context, kuebconfig string) ([]tinkv1alpha1.Hardware, error)
AllBaseboardManagements(ctx context.Context, kubeconfig string) ([]rufiounreleased.BaseboardManagement, error)
HasCRD(ctx context.Context, kubeconfig, crd string) (bool, error)
DeleteCRD(ctx context.Context, kubeconfig, crd string) error
}
// KeyGenerator generates ssh keys and writes them to a FileWriter.
type SSHAuthKeyGenerator interface {
GenerateSSHAuthKey(filewriter.FileWriter) (string, error)
}
func NewProvider(
datacenterConfig *v1alpha1.TinkerbellDatacenterConfig,
machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig,
clusterConfig *v1alpha1.Cluster,
hardwareCSVPath string,
writer filewriter.FileWriter,
docker stack.Docker,
helm stack.Helm,
providerKubectlClient ProviderKubectlClient,
tinkerbellIP string,
now types.NowFunc,
forceCleanup bool,
skipIpCheck bool,
) (*Provider, error) {
var controlPlaneMachineSpec, workerNodeGroupMachineSpec, etcdMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
if clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef != nil && machineConfigs[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] != nil {
controlPlaneMachineSpec = &machineConfigs[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec
}
workerNodeGroupMachineSpecs := make(map[string]v1alpha1.TinkerbellMachineConfigSpec, len(machineConfigs))
for _, wnConfig := range clusterConfig.Spec.WorkerNodeGroupConfigurations {
if wnConfig.MachineGroupRef != nil && machineConfigs[wnConfig.MachineGroupRef.Name] != nil {
workerNodeGroupMachineSpec = &machineConfigs[wnConfig.MachineGroupRef.Name].Spec
workerNodeGroupMachineSpecs[wnConfig.MachineGroupRef.Name] = *workerNodeGroupMachineSpec
}
}
if clusterConfig.Spec.ExternalEtcdConfiguration != nil {
if clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef != nil && machineConfigs[clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name] != nil {
etcdMachineSpec = &machineConfigs[clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec
}
}
var proxyConfig *v1alpha1.ProxyConfiguration
if clusterConfig.Spec.ProxyConfiguration != nil {
proxyConfig = &v1alpha1.ProxyConfiguration{
HttpProxy: clusterConfig.Spec.ProxyConfiguration.HttpProxy,
HttpsProxy: clusterConfig.Spec.ProxyConfiguration.HttpsProxy,
NoProxy: generateNoProxyList(clusterConfig, datacenterConfig.Spec),
}
// We need local tinkerbell IP only in case of management
// cluster's create and upgrade that too for the kind cluster.
// GenerateNoProxyList is getting used by all the cluster operations.
// Thus moving adding tinkerbell Local IP to here.
if !slices.SliceContains(proxyConfig.NoProxy, tinkerbellIP) {
proxyConfig.NoProxy = append(proxyConfig.NoProxy, tinkerbellIP)
}
} else {
proxyConfig = nil
}
return &Provider{
clusterConfig: clusterConfig,
datacenterConfig: datacenterConfig,
machineConfigs: machineConfigs,
stackInstaller: stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, clusterConfig.Spec.ClusterNetwork.Pods.CidrBlocks[0], registrymirror.FromCluster(clusterConfig), proxyConfig),
providerKubectlClient: providerKubectlClient,
templateBuilder: &TemplateBuilder{
datacenterSpec: &datacenterConfig.Spec,
controlPlaneMachineSpec: controlPlaneMachineSpec,
WorkerNodeGroupMachineSpecs: workerNodeGroupMachineSpecs,
etcdMachineSpec: etcdMachineSpec,
tinkerbellIP: tinkerbellIP,
now: now,
},
writer: writer,
hardwareCSVFile: hardwareCSVPath,
// TODO(chrisdoherty4) Inject the catalogue dependency so we can dynamically construcft the
// indexing capabilities.
catalogue: hardware.NewCatalogue(
hardware.WithHardwareIDIndex(),
hardware.WithHardwareBMCRefIndex(),
hardware.WithBMCNameIndex(),
hardware.WithSecretNameIndex(),
),
tinkerbellIP: tinkerbellIP,
netClient: &networkutils.DefaultNetClient{},
retrier: retrier.NewWithMaxRetries(maxRetries, backOffPeriod),
// (chrisdoherty4) We're hard coding the dependency and monkey patching in testing because the provider
// isn't very testable right now and we already have tests in the `tinkerbell` package so can monkey patch
// directly. This is very much a hack for testability.
keyGenerator: common.SshAuthKeyGenerator{},
// Behavioral flags.
forceCleanup: forceCleanup,
skipIpCheck: skipIpCheck,
}, nil
}
func (p *Provider) Name() string {
return constants.TinkerbellProviderName
}
func (p *Provider) DatacenterResourceType() string {
return eksaTinkerbellDatacenterResourceType
}
func (p *Provider) MachineResourceType() string {
return eksaTinkerbellMachineResourceType
}
func (p *Provider) UpdateSecrets(ctx context.Context, cluster *types.Cluster, _ *cluster.Spec) error {
// TODO: implement
return nil
}
func (p *Provider) UpdateKubeConfig(content *[]byte, clusterName string) error {
// TODO: Figure out if something is needed here
return nil
}
func (p *Provider) Version(clusterSpec *cluster.Spec) string {
return clusterSpec.VersionsBundle.Tinkerbell.Version
}
func (p *Provider) EnvMap(spec *cluster.Spec) (map[string]string, error) {
return map[string]string{
// The TINKERBELL_IP is input for the CAPT deployment and used as part of default template
// generation. However, we use custom templates and leverage the template override
// functionality of CAPT hence this never gets used.
//
// Deployment manifest requiring the env var for replacement.
// https://github.com/tinkerbell/cluster-api-provider-tinkerbell/blob/main/config/manager/manager.yaml#L23
//
// Template override
// https://github.com/tinkerbell/cluster-api-provider-tinkerbell/blob/main/controllers/machine.go#L182
//
// Env read having set TINKERBELL_IP in the deployment manifest.
// https://github.com/tinkerbell/cluster-api-provider-tinkerbell/blob/main/controllers/machine.go#L192
"TINKERBELL_IP": "IGNORED",
"KUBEADM_BOOTSTRAP_TOKEN_TTL": "120m",
}, nil
}
// SetStackInstaller configures p to use installer for Tinkerbell stack install and upgrade.
func (p *Provider) SetStackInstaller(installer stack.StackInstaller) {
p.stackInstaller = installer
}
func (p *Provider) GetDeployments() map[string][]string {
return map[string][]string{
"capt-system": {"capt-controller-manager"},
}
}
func (p *Provider) GetInfrastructureBundle(clusterSpec *cluster.Spec) *types.InfrastructureBundle {
bundle := clusterSpec.VersionsBundle
folderName := fmt.Sprintf("infrastructure-tinkerbell/%s/", bundle.Tinkerbell.Version)
infraBundle := types.InfrastructureBundle{
FolderName: folderName,
Manifests: []releasev1alpha1.Manifest{
bundle.Tinkerbell.Components,
bundle.Tinkerbell.Metadata,
bundle.Tinkerbell.ClusterTemplate,
},
}
return &infraBundle
}
func (p *Provider) DatacenterConfig(_ *cluster.Spec) providers.DatacenterConfig {
return p.datacenterConfig
}
func (p *Provider) MachineConfigs(_ *cluster.Spec) []providers.MachineConfig {
configs := make(map[string]providers.MachineConfig, len(p.machineConfigs))
controlPlaneMachineName := p.clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
p.machineConfigs[controlPlaneMachineName].Annotations = map[string]string{p.clusterConfig.ControlPlaneAnnotation(): "true"}
if p.clusterConfig.IsManaged() {
p.machineConfigs[controlPlaneMachineName].SetManagedBy(p.clusterConfig.ManagedBy())
}
configs[controlPlaneMachineName] = p.machineConfigs[controlPlaneMachineName]
if p.clusterConfig.Spec.ExternalEtcdConfiguration != nil {
etcdMachineName := p.clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
p.machineConfigs[etcdMachineName].Annotations = map[string]string{p.clusterConfig.EtcdAnnotation(): "true"}
if etcdMachineName != controlPlaneMachineName {
configs[etcdMachineName] = p.machineConfigs[etcdMachineName]
if p.clusterConfig.IsManaged() {
p.machineConfigs[etcdMachineName].SetManagedBy(p.clusterConfig.ManagedBy())
}
}
}
for _, workerNodeGroupConfiguration := range p.clusterConfig.Spec.WorkerNodeGroupConfigurations {
workerMachineName := workerNodeGroupConfiguration.MachineGroupRef.Name
if _, ok := configs[workerMachineName]; !ok {
configs[workerMachineName] = p.machineConfigs[workerMachineName]
if p.clusterConfig.IsManaged() {
p.machineConfigs[workerMachineName].SetManagedBy(p.clusterConfig.ManagedBy())
}
}
}
return providers.ConfigsMapToSlice(configs)
}
func (p *Provider) ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff {
if currentSpec.VersionsBundle.Tinkerbell.Version == newSpec.VersionsBundle.Tinkerbell.Version {
return nil
}
return &types.ComponentChangeDiff{
ComponentName: constants.TinkerbellProviderName,
NewVersion: newSpec.VersionsBundle.Tinkerbell.Version,
OldVersion: currentSpec.VersionsBundle.Tinkerbell.Version,
}
}
func (p *Provider) InstallCustomProviderComponents(ctx context.Context, kubeconfigFile string) error {
return nil
}
| 311 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"errors"
"fmt"
"os"
"path"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
v1 "k8s.io/api/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
filewritermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
stackmocks "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
const (
testDataDir = "testdata"
testIP = "5.6.7.8"
)
func givenClusterSpec(t *testing.T, fileName string) *cluster.Spec {
return test.NewFullClusterSpec(t, path.Join(testDataDir, fileName))
}
func givenDatacenterConfig(t *testing.T, fileName string) *v1alpha1.TinkerbellDatacenterConfig {
datacenterConfig, err := v1alpha1.GetTinkerbellDatacenterConfig(path.Join(testDataDir, fileName))
if err != nil {
t.Fatalf("unable to get datacenter config from file: %v", err)
}
return datacenterConfig
}
func givenMachineConfigs(t *testing.T, fileName string) map[string]*v1alpha1.TinkerbellMachineConfig {
machineConfigs, err := v1alpha1.GetTinkerbellMachineConfigs(path.Join(testDataDir, fileName))
if err != nil {
t.Fatalf("unable to get machine configs from file: %v", err)
}
return machineConfigs
}
func assertError(t *testing.T, expected string, err error) {
if err == nil {
t.Fatalf("Expected=<%s> actual=<nil>", expected)
}
actual := err.Error()
if expected != actual {
t.Fatalf("Expected=<%s> actual=<%s>", expected, actual)
}
}
func newProvider(datacenterConfig *v1alpha1.TinkerbellDatacenterConfig, machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig, clusterConfig *v1alpha1.Cluster, writer filewriter.FileWriter, docker stack.Docker, helm stack.Helm, kubectl ProviderKubectlClient, forceCleanup bool) *Provider {
hardwareFile := "./testdata/hardware.csv"
provider, err := NewProvider(
datacenterConfig,
machineConfigs,
clusterConfig,
hardwareFile,
writer,
docker,
helm,
kubectl,
testIP,
test.FakeNow,
forceCleanup,
false,
)
if err != nil {
panic(err)
}
return provider
}
func TestTinkerbellProviderGenerateDeploymentFileWithExternalEtcd(t *testing.T) {
t.Skip("External etcd unsupported for GA")
clusterSpecManifest := "cluster_tinkerbell_external_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_external_etcd.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md.yaml")
}
func TestTinkerbellProviderWithExternalEtcdFail(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_external_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assert.Error(t, err, "expect validation to fail because external etcd is not supported")
err = provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
assert.Error(t, err, "expect validation to fail because external etcd is not supported")
}
func TestTinkerbellProviderMachineConfigsMissingUserSshKeys(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_missing_ssh_keys.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
keyGenerator := mocks.NewMockSSHAuthKeyGenerator(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
const sshKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="
keyGenerator.EXPECT().GenerateSSHAuthKey(gomock.Any()).Return(sshKey, nil)
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
// Hack: monkey patch the key generator and the stack installer directly for determinism.
provider.keyGenerator = keyGenerator
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_missing_ssh_keys.yaml")
}
func TestTinkerbellProviderGenerateDeploymentFileWithStackedEtcd(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_stacked_etcd.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md.yaml")
}
func TestTinkerbellProviderGenerateDeploymentFileWithAutoscalerConfiguration(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
wng := &clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0]
ca := &v1alpha1.AutoScalingConfiguration{
MaxCount: 5,
MinCount: 3,
}
wng.AutoScalingConfiguration = ca
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_stacked_etcd.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_autoscaler_md.yaml")
}
func TestTinkerbellProviderGenerateDeploymentFileWithNodeLabels(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_node_labels.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_node_labels.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md_node_labels.yaml")
}
func TestTinkerbellProviderGenerateDeploymentFileWithNodeTaints(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_node_taints.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_node_taints.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md_node_taints.yaml")
}
func TestTinkerbellProviderGenerateDeploymentFileMultipleWorkerNodeGroups(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_multiple_node_groups.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_external_etcd.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_tinkerbell_md_multiple_node_groups.yaml")
}
func TestPreCAPIInstallOnBootstrapSuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test", KubeconfigFile: "test.kubeconfig"}
ctx := context.Background()
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().Install(
ctx,
clusterSpec.VersionsBundle.Tinkerbell,
testIP,
"test.kubeconfig",
"",
gomock.Any(),
gomock.Any(),
)
err := provider.PreCAPIInstallOnBootstrap(ctx, cluster, clusterSpec)
if err != nil {
t.Fatalf("failed PreCAPIInstallOnBootstrap: %v", err)
}
}
func TestPostWorkloadInitSuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test", KubeconfigFile: "test.kubeconfig"}
ctx := context.Background()
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().Install(
ctx,
clusterSpec.VersionsBundle.Tinkerbell,
testIP,
"test.kubeconfig",
"",
gomock.Any(),
gomock.Any(),
gomock.Any(),
gomock.Any(),
)
stackInstaller.EXPECT().UninstallLocal(ctx)
err := provider.PostWorkloadInit(ctx, cluster, clusterSpec)
if err != nil {
t.Fatalf("failed PostWorkloadInit: %v", err)
}
}
func TestPostBootstrapSetupSuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test", KubeconfigFile: "test.kubeconfig"}
ctx := context.Background()
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
kubectl.EXPECT().ApplyKubeSpecFromBytesForce(ctx, cluster, gomock.Any())
kubectl.EXPECT().WaitForRufioMachines(ctx, cluster, "5m", "Contactable", gomock.Any()).MaxTimes(2)
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
if err := provider.readCSVToCatalogue(); err != nil {
t.Fatalf("failed to read hardware csv: %v", err)
}
err := provider.PostBootstrapSetup(ctx, provider.clusterConfig, cluster)
if err != nil {
t.Fatalf("failed PostBootstrapSetup: %v", err)
}
}
func TestPostBootstrapSetupWaitForRufioMachinesFail(t *testing.T) {
wantError := errors.New("test error")
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test", KubeconfigFile: "test.kubeconfig"}
ctx := context.Background()
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
kubectl.EXPECT().ApplyKubeSpecFromBytesForce(ctx, cluster, gomock.Any())
kubectl.EXPECT().WaitForRufioMachines(ctx, cluster, "5m", "Contactable", gomock.Any()).Return(wantError)
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
if err := provider.readCSVToCatalogue(); err != nil {
t.Fatalf("failed to read hardware csv: %v", err)
}
err := provider.PostBootstrapSetup(ctx, provider.clusterConfig, cluster)
assert.Error(t, err, "PostBootstrapSetup should fail")
}
func TestPostMoveManagementToBootstrapSuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test", KubeconfigFile: "test.kubeconfig"}
ctx := context.Background()
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
kubectl.EXPECT().WaitForRufioMachines(ctx, cluster, "5m", "Contactable", gomock.Any()).Return(nil).MaxTimes(2)
tt := []struct {
name string
hardwareCSVFile string
}{
{
name: "bmc in hardware csv",
hardwareCSVFile: "./testdata/hardware.csv",
},
{
name: "no bmc in hardware csv",
hardwareCSVFile: "./testdata/hardware_nobmc.csv",
},
}
for _, test := range tt {
t.Run(test.name, func(t *testing.T) {
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.hardwareCSVFile = test.hardwareCSVFile
if err := provider.readCSVToCatalogue(); err != nil {
t.Fatalf("failed to read hardware csv: %v", err)
}
err := provider.PostMoveManagementToBootstrap(ctx, cluster)
if err != nil {
t.Fatalf("failed PostMoveManagementToBootstrap: %v", err)
}
})
}
}
func TestPostMoveManagementToBootstrapWaitForRufioMachinesFail(t *testing.T) {
wantError := errors.New("test error")
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test", KubeconfigFile: "test.kubeconfig"}
ctx := context.Background()
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
kubectl.EXPECT().WaitForRufioMachines(ctx, cluster, "5m", "Contactable", gomock.Any()).Return(wantError)
if err := provider.readCSVToCatalogue(); err != nil {
t.Fatalf("failed to read hardware csv: %v", err)
}
err := provider.PostMoveManagementToBootstrap(ctx, cluster)
assert.Error(t, err, "PostMoveManagementToBootstrap should fail")
}
func TestTinkerbellProviderGenerateDeploymentFileWithFullOIDC(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_full_oidc.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_full_oidc.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md.yaml")
}
func TestTinkerbellProviderGenerateDeploymentFileWithMinimalOIDC(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_minimal_oidc.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_minimal_oidc.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md.yaml")
}
func TestTinkerbellProviderGenerateDeploymentFileWithAWSIamConfig(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_awsiam.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_awsiam.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md.yaml")
}
func TestProviderGenerateDeploymentFileForWithMinimalRegistryMirror(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_minimal_registry_mirror.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_minimal_registry_mirror.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md_minimal_registry_mirror.yaml")
}
func TestProviderGenerateDeploymentFileForWithRegistryMirrorWithCert(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_registry_mirror_with_cert.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_registry_mirror_with_cert.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md_registry_mirror_with_cert.yaml")
}
func TestProviderGenerateDeploymentFileForWithRegistryMirrorWithAuth(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_registry_mirror_with_auth.yaml"
if err := os.Setenv("REGISTRY_USERNAME", "username"); err != nil {
t.Fatalf(err.Error())
}
if err := os.Setenv("REGISTRY_PASSWORD", "password"); err != nil {
t.Fatalf(err.Error())
}
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_registry_mirror_with_auth.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md_registry_mirror_with_auth.yaml")
}
func TestProviderGenerateDeploymentFileForWithBottlerocketMinimalRegistryMirror(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_bottlerocket_minimal_registry_mirror.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_bottlerocket_cp_minimal_registry_mirror.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_bottlerocket_md_minimal_registry_mirror.yaml")
}
func TestProviderGenerateDeploymentFileForWithBottlerocketRegistryMirrorWithCert(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_bottlerocket_registry_mirror_with_cert.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_bottlerocket_cp_registry_mirror_with_cert.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_bottlerocket_md_registry_mirror_with_cert.yaml")
}
func TestProviderGenerateDeploymentFileForWithBottlerocketRegistryMirrorWithAuth(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_bottlerocket_registry_mirror_with_auth.yaml"
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_bottlerocket_cp_registry_mirror_with_auth.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_bottlerocket_md_registry_mirror_with_auth.yaml")
}
func TestProviderGenerateDeploymentFileForSingleNodeCluster(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_single_node.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
if len(md) != 0 {
t.Fatalf("expect nothing to be generated for worker node")
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_single_node.yaml")
}
func TestProviderGenerateDeploymentFileForSingleNodeClusterSkipLB(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_single_node_skip_lb.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
if len(md) != 0 {
t.Fatalf("expect nothing to be generated for worker node")
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_single_node_skip_lb.yaml")
}
func TestTinkerbellTemplate_isScaleUpDownSuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
newClusterSpec := clusterSpec.DeepCopy()
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(2)
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
assert.True(t, provider.isScaleUpDown(clusterSpec.Cluster, newClusterSpec.Cluster), "expected scale up down true")
}
func TestSetupAndValidateCreateWorkloadClusterSuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, nil)
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.ManagementCluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(datacenterConfig, nil)
kubectl.EXPECT().ApplyKubeSpecFromBytesForce(ctx, clusterSpec.ManagementCluster, gomock.Any())
kubectl.EXPECT().WaitForRufioMachines(ctx, clusterSpec.ManagementCluster, "5m", "Contactable", constants.EksaSystemNamespace)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestSetupAndValidateCreateWorkloadClusterFailsIfMachineExists(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
idx := 0
var existingMachine string
for _, config := range machineConfigs {
if idx == 0 {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{config}, nil)
existingMachine = config.Name
} else {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil).MaxTimes(1)
}
idx++
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, fmt.Sprintf("TinkerbellMachineConfig %s already exists", existingMachine), err)
}
func TestSetupAndValidateCreateWorkloadClusterFailsIfDatacenterExists(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{datacenterConfig}, nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, fmt.Sprintf("TinkerbellDatacenterConfig %s already exists", datacenterConfig.Name), err)
}
func TestSetupAndValidateCreateWorkloadClusterFailsIfDatacenterConfigError(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, errors.New("error getting TinkerbellDatacenterConfig"))
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, "error getting TinkerbellDatacenterConfig", err)
}
func TestSetupAndValidateCreateWorkloadClusterErrorUnprovisionedHardware(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, nil)
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, errors.New("error"))
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, "retrieving unprovisioned hardware: error", err)
}
func TestSetupAndValidateCreateWorkloadClusterErrorProvisionedHardware(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, nil)
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, errors.New("error"))
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, "retrieving provisioned hardware: error", err)
}
func TestSetupAndValidateUpgradeClusterErrorValidateClusterSpec(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
provider.providerKubectlClient = kubectl
cluster.KubeconfigFile = "kc.kubeconfig"
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, cluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, cluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
provider.datacenterConfig.Spec.TinkerbellIP = ""
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
assertError(t, "TinkerbellDatacenterConfig: missing spec.tinkerbellIP field", err)
}
func TestSetupAndValidateUpgradeWorkloadClusterErrorApplyHardware(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
cluster.KubeconfigFile = "kc.kubeconfig"
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().ApplyKubeSpecFromBytesForce(ctx, clusterSpec.ManagementCluster, gomock.Any()).Return(fmt.Errorf("error"))
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
assertError(t, "applying hardware yaml: error", err)
}
func TestSetupAndValidateUpgradeWorkloadClusterErrorBMC(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
provider.providerKubectlClient = kubectl
provider.hardwareCSVFile = "testdata/hardware.csv"
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
cluster.KubeconfigFile = "kc.kubeconfig"
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().ApplyKubeSpecFromBytesForce(ctx, clusterSpec.ManagementCluster, gomock.Any())
kubectl.EXPECT().WaitForRufioMachines(ctx, cluster, "5m", "Contactable", gomock.Any()).Return(fmt.Errorf("error"))
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
assertError(t, "waiting for baseboard management to be contactable: error", err)
}
func TestSetupAndValidateCreateWorkloadClusterErrorManagementCluster(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, nil)
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.ManagementCluster.Name).Return(clusterSpec.Cluster, fmt.Errorf("error getting management cluster data"))
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, "error getting management cluster data", err)
}
func TestSetupAndValidateCreateWorkloadClusterErrorUnspecifiedTinkerbellIP(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
// Set workload cluster tinkerbell ip to empty string
clusterSpec.TinkerbellDatacenter.Spec.TinkerbellIP = ""
clusterDatacenterConfig := datacenterConfig.DeepCopy()
clusterDatacenterConfig.Spec.TinkerbellIP = ""
provider := newProvider(clusterDatacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, nil)
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.ManagementCluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(datacenterConfig, nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, "TinkerbellDatacenterConfig: missing spec.tinkerbellIP field", err)
}
func TestSetupAndValidateCreateWorkloadClusterErrorManagementClusterTinkerbellIP(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, nil)
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.ManagementCluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(datacenterConfig, fmt.Errorf("error"))
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, "getting TinkerbellIP of management cluster: error", err)
}
func TestSetupAndValidateCreateWorkloadClusterErrorDifferentTinkerbellIP(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
managementDatacenterConfig := datacenterConfig.DeepCopy()
managementDatacenterConfig.Spec.TinkerbellIP = "different.ip"
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
provider.providerKubectlClient = kubectl
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range machineConfigs {
kubectl.EXPECT().SearchTinkerbellMachineConfig(ctx, config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.TinkerbellMachineConfig{}, nil)
}
kubectl.EXPECT().SearchTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.TinkerbellDatacenterConfig{}, nil)
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.ManagementCluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaTinkerbellDatacenterConfig(ctx, datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(managementDatacenterConfig, nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assertError(t, "TinkerbellIP 5.6.7.8 does not match management cluster ip different.ip", err)
}
func TestProviderGenerateDeploymentFileForWithProxy(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_proxy.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_cp_proxy.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_cluster_tinkerbell_md_proxy.yaml")
}
func TestProviderGenerateDeploymentFileForBottleRocketWithNTPConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_ntp_config.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_ntp_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_ntp_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForUbuntuWithNTPConfig(t *testing.T) {
clusterSpecManifest := "cluster_ubuntu_ntp_config.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_ubuntu_ntp_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_ubuntu_ntp_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottlerocketWithBottlerocketSettingsConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_settings_config.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_settings_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_settings_config_md.yaml")
}
func TestTinkerbellProviderGenerateCAPISpecForCreateWithPodIAMConfig(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_awsiam.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
clusterSpec.Cluster.Spec.PodIAMConfig = &v1alpha1.PodIAMConfig{ServiceAccountIssuer: "https://test"}
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_tinkerbell_pod_iam_config.yaml")
}
func TestProviderGenerateDeploymentFileForBottlerocketWithKernelSettingsConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_kernel_settings_config.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_kernel_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_kernel_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForWhenKubeVipDisabled(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_disable_kube_vip.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_"+clusterSpecManifest)
}
func TestProviderGenerateDeploymentFileForBottlerocketWithCertBundlesConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_cert_bundles_config.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
forceCleanup := false
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
ctx := context.Background()
provider := newProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl, forceCleanup)
provider.stackInstaller = stackInstaller
stackInstaller.EXPECT().CleanupLocalBoots(ctx, forceCleanup)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_cert_bundles_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_cert_bundles_config_md.yaml")
}
func TestTinkerbellProvider_GenerateCAPISpecForUpgrade_RegistryMirror(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_upgrade_registry_mirror.yaml"
mockCtrl := gomock.NewController(t)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
cluster := &types.Cluster{Name: "test"}
kubeadmControlPlane := &controlplanev1.KubeadmControlPlane{
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
Name: "test-control-plane-template-1234567890000",
},
},
},
}
machineDeployment := &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-0-template-1234567890000",
},
},
InfrastructureRef: v1.ObjectReference{
Name: "test-md-0-1234567890000",
},
},
},
},
}
ctx := context.Background()
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
updatedClusterSpec := clusterSpec.DeepCopy()
*clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count++
kubectl.EXPECT().
GetUnprovisionedTinkerbellHardware(gomock.Any(), gomock.Any(), gomock.Any()).
Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().
GetProvisionedTinkerbellHardware(gomock.Any(), gomock.Any(), gomock.Any()).
Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().
GetEksaCluster(gomock.Any(), gomock.Any(), gomock.Any()).
Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().
GetEksaTinkerbellDatacenterConfig(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Return(datacenterConfig, nil)
kubectl.EXPECT().
GetKubeadmControlPlane(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Return(kubeadmControlPlane, nil)
kubectl.EXPECT().
GetMachineDeployment(gomock.Any(), gomock.Any(), gomock.Any()).
Return(machineDeployment, nil)
kubectl.EXPECT().
GetMachineDeployment(gomock.Any(), gomock.Any(), gomock.Any()).
Return(machineDeployment, nil)
provider := newProvider(datacenterConfig, machineConfigs, updatedClusterSpec.Cluster, writer, docker, helm, kubectl, false)
provider.stackInstaller = stackInstaller
if err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, updatedClusterSpec, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForUpgrade(context.Background(), cluster, cluster, clusterSpec, updatedClusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_tinkerbell_upgrade_registry_mirror.yaml")
}
| 1,861 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"errors"
"fmt"
"reflect"
rufiov1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/rufiounreleased"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/yaml"
)
func needsNewControlPlaneTemplate(oldSpec, newSpec *cluster.Spec) bool {
// Another option is to generate MachineTemplates based on the old and new eksa spec,
// remove the name field and compare them with DeepEqual
// We plan to approach this way since it's more flexible to add/remove fields and test out for validation
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
return false
}
func needsNewWorkloadTemplate(oldSpec, newSpec *cluster.Spec) bool {
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
if !v1alpha1.WorkerNodeGroupConfigurationSliceTaintsEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) ||
!v1alpha1.WorkerNodeGroupConfigurationsLabelsMapEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) {
return true
}
return false
}
func needsNewKubeadmConfigTemplate(newWorkerNodeGroup, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration) bool {
return !v1alpha1.TaintsSliceEqual(newWorkerNodeGroup.Taints, oldWorkerNodeGroup.Taints) || !v1alpha1.MapEqual(newWorkerNodeGroup.Labels, oldWorkerNodeGroup.Labels)
}
func (p *Provider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, currentClusterSpec *cluster.Spec) error {
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
return errExternalEtcdUnsupported
}
if err := p.configureSshKeys(); err != nil {
return err
}
// If we've been given a CSV with additional hardware for the cluster, validate it and
// write it to the catalogue so it can be used for further processing.
if p.hardwareCSVIsProvided() {
machineCatalogueWriter := hardware.NewMachineCatalogueWriter(p.catalogue)
machines, err := hardware.NewNormalizedCSVReaderFromFile(p.hardwareCSVFile)
if err != nil {
return err
}
machineValidator := hardware.NewDefaultMachineValidator()
if err := hardware.TranslateAll(machines, machineCatalogueWriter, machineValidator); err != nil {
return err
}
}
// Retrieve all unprovisioned hardware from the existing cluster and populate the catalogue so
// it can be considered for the upgrade.
hardware, err := p.providerKubectlClient.GetUnprovisionedTinkerbellHardware(
ctx,
cluster.KubeconfigFile,
constants.EksaSystemNamespace,
)
if err != nil {
return fmt.Errorf("retrieving unprovisioned hardware: %v", err)
}
for i := range hardware {
if err := p.catalogue.InsertHardware(&hardware[i]); err != nil {
return err
}
}
// Retrieve all provisioned hardware from the existing cluster and populate diskExtractors's
// disksProvisionedHardware map for use during upgrade
hardware, err = p.providerKubectlClient.GetProvisionedTinkerbellHardware(
ctx,
cluster.KubeconfigFile,
constants.EksaSystemNamespace,
)
if err != nil {
return fmt.Errorf("retrieving provisioned hardware: %v", err)
}
// Remove all the provisioned hardware from the existing cluster if repeated from the hardware csv input.
if err := p.catalogue.RemoveHardwares(hardware); err != nil {
return err
}
if err := p.validateAvailableHardwareForUpgrade(ctx, currentClusterSpec, clusterSpec); err != nil {
return err
}
if p.clusterConfig.IsManaged() {
// Update stack helm enviorment variable NO_PROXY value and append management cluster's Control plane Endpoint IP in case of workload cluster upgrade
if clusterSpec.Cluster.Spec.ProxyConfiguration != nil {
managementCluster, err := p.providerKubectlClient.GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.Cluster.Spec.ManagementCluster.Name)
if err != nil {
return err
}
p.stackInstaller.AddNoProxyIP(managementCluster.Spec.ControlPlaneConfiguration.Endpoint.Host)
}
if err := p.applyHardwareUpgrade(ctx, clusterSpec.ManagementCluster); err != nil {
return err
}
if p.catalogue.TotalHardware() > 0 && p.catalogue.AllHardware()[0].Spec.BMCRef != nil {
err = p.providerKubectlClient.WaitForRufioMachines(ctx, cluster, "5m", "Contactable", constants.EksaSystemNamespace)
if err != nil {
return fmt.Errorf("waiting for baseboard management to be contactable: %v", err)
}
}
}
return nil
}
func (p *Provider) validateAvailableHardwareForUpgrade(ctx context.Context, currentSpec, newClusterSpec *cluster.Spec) (err error) {
clusterSpecValidator := NewClusterSpecValidator(
HardwareSatisfiesOnlyOneSelectorAssertion(p.catalogue),
)
rollingUpgrade := false
if currentSpec.Cluster.Spec.KubernetesVersion != newClusterSpec.Cluster.Spec.KubernetesVersion {
clusterSpecValidator.Register(ExtraHardwareAvailableAssertionForRollingUpgrade(p.catalogue))
rollingUpgrade = true
}
currentTinkerbellSpec := NewClusterSpec(currentSpec, currentSpec.TinkerbellMachineConfigs, currentSpec.TinkerbellDatacenter)
clusterSpecValidator.Register(AssertionsForScaleUpDown(p.catalogue, &ValidatableTinkerbellClusterSpec{currentTinkerbellSpec}, rollingUpgrade))
tinkerbellClusterSpec := NewClusterSpec(newClusterSpec, p.machineConfigs, p.datacenterConfig)
if err := clusterSpecValidator.Validate(tinkerbellClusterSpec); err != nil {
return err
}
return nil
}
func (p *Provider) PostBootstrapDeleteForUpgrade(ctx context.Context) error {
if err := p.stackInstaller.UninstallLocal(ctx); err != nil {
return err
}
return nil
}
func (p *Provider) PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return p.applyHardwareUpgrade(ctx, cluster)
}
// ApplyHardwareToCluster adds all the hardwares to the cluster.
func (p *Provider) applyHardwareUpgrade(ctx context.Context, cluster *types.Cluster) error {
allHardware := p.catalogue.AllHardware()
if len(allHardware) == 0 {
return nil
}
hardwareSpec, err := hardware.MarshalCatalogue(p.catalogue)
if err != nil {
return fmt.Errorf("failed marshalling resources for hardware spec: %v", err)
}
err = p.providerKubectlClient.ApplyKubeSpecFromBytesForce(ctx, cluster, hardwareSpec)
if err != nil {
return fmt.Errorf("applying hardware yaml: %v", err)
}
return nil
}
func (p *Provider) PostMoveManagementToBootstrap(ctx context.Context, bootstrapCluster *types.Cluster) error {
// Check if the hardware in the catalogue have a BMCRef. Since we only allow either all hardware with bmc
// or no hardware with bmc, its sufficient to check the first hardware.
if p.catalogue.TotalHardware() > 0 && p.catalogue.AllHardware()[0].Spec.BMCRef != nil {
// Waiting to ensure all the new and exisiting baseboardmanagement connections are valid.
err := p.providerKubectlClient.WaitForRufioMachines(ctx, bootstrapCluster, "5m", "Contactable", constants.EksaSystemNamespace)
if err != nil {
return fmt.Errorf("waiting for baseboard management to be contactable: %v", err)
}
}
return nil
}
func (p *Provider) RunPostControlPlaneUpgrade(ctx context.Context, oldClusterSpec *cluster.Spec, clusterSpec *cluster.Spec, workloadCluster *types.Cluster, managementCluster *types.Cluster) error {
// @TODO: do we need this for bare metal upgrade?
// Use retrier so that cluster upgrade does not fail due to any intermittent failure while connecting to kube-api server
// This is unfortunate, but ClusterResourceSet's don't support any type of reapply of the resources they manage
// Even if we create a new ClusterResourceSet, if such resources already exist in the cluster, they won't be reapplied
// The long term solution is to add this capability to the cluster-api controller,
// with a new mode like "ReApplyOnChanges" or "ReApplyOnCreate" vs the current "ReApplyOnce"
/* err := p.retrier.Retry(
func() error {
return p.resourceSetManager.ForceUpdate(ctx, resourceSetName(clusterSpec), constants.EksaSystemNamespace, managementCluster, workloadCluster)
},
)
if err != nil {
return fmt.Errorf("failed updating the tinkerbell provider resource set post upgrade: %v", err)
} */
return nil
}
func (p *Provider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
prevSpec, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name)
if err != nil {
return err
}
prevDatacenterConfig, err := p.providerKubectlClient.GetEksaTinkerbellDatacenterConfig(ctx, prevSpec.Spec.DatacenterRef.Name, cluster.KubeconfigFile, prevSpec.Namespace)
if err != nil {
return err
}
oSpec := prevDatacenterConfig.Spec
nSpec := p.datacenterConfig.Spec
prevMachineConfigRefs := machineRefSliceToMap(prevSpec.MachineConfigRefs())
for _, machineConfigRef := range clusterSpec.Cluster.MachineConfigRefs() {
machineConfig, ok := p.machineConfigs[machineConfigRef.Name]
if !ok {
return fmt.Errorf("cannot find machine config %s in tinkerbell provider machine configs", machineConfigRef.Name)
}
if _, ok = prevMachineConfigRefs[machineConfig.Name]; !ok {
return fmt.Errorf("cannot add or remove MachineConfigs as part of upgrade")
}
err = p.validateMachineConfigImmutability(ctx, cluster, machineConfig, clusterSpec)
if err != nil {
return err
}
}
if nSpec.TinkerbellIP != oSpec.TinkerbellIP {
return fmt.Errorf("spec.TinkerbellIP is immutable. Previous value %s, New value %s", oSpec.TinkerbellIP, nSpec.TinkerbellIP)
}
// for any operation other than k8s version change, osImageURL and hookImageURL are immutable
if prevSpec.Spec.KubernetesVersion == clusterSpec.Cluster.Spec.KubernetesVersion {
if nSpec.OSImageURL != oSpec.OSImageURL {
return fmt.Errorf("spec.OSImageURL is immutable. Previous value %s, New value %s", oSpec.OSImageURL, nSpec.OSImageURL)
}
if nSpec.HookImagesURLPath != oSpec.HookImagesURLPath {
return fmt.Errorf("spec.HookImagesURLPath is immutable. Previous value %s, New value %s", oSpec.HookImagesURLPath, nSpec.HookImagesURLPath)
}
}
return nil
}
func (p *Provider) UpgradeNeeded(_ context.Context, _, _ *cluster.Spec, _ *types.Cluster) (bool, error) {
// TODO: Figure out if something is needed here
return false, nil
}
func (p *Provider) hardwareCSVIsProvided() bool {
return p.hardwareCSVFile != ""
}
func (p *Provider) isScaleUpDown(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster) bool {
if oldCluster.Spec.ControlPlaneConfiguration.Count != newCluster.Spec.ControlPlaneConfiguration.Count {
return true
}
workerNodeGroupMap := make(map[string]*v1alpha1.WorkerNodeGroupConfiguration)
for i := range oldCluster.Spec.WorkerNodeGroupConfigurations {
workerNodeGroupMap[oldCluster.Spec.WorkerNodeGroupConfigurations[i].Name] = &oldCluster.Spec.WorkerNodeGroupConfigurations[i]
}
for _, nodeGroupNewSpec := range newCluster.Spec.WorkerNodeGroupConfigurations {
if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok {
if *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count {
return true
}
}
}
return false
}
/* func (p *Provider) isScaleUpDown(currentSpec *cluster.Spec, newSpec *cluster.Spec) bool {
if currentSpec.Cluster.Spec.ControlPlaneConfiguration.Count != newSpec.Cluster.Spec.ControlPlaneConfiguration.Count {
return true
}
workerNodeGroupMap := make(map[string]*v1alpha1.WorkerNodeGroupConfiguration)
for _, workerNodeGroupConfiguration := range currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workerNodeGroupMap[workerNodeGroupConfiguration.Name] = &workerNodeGroupConfiguration
}
for _, nodeGroupNewSpec := range newSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok {
if *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count {
return true
}
}
}
return false
} */
func (p *Provider) validateMachineConfigImmutability(ctx context.Context, cluster *types.Cluster, newConfig *v1alpha1.TinkerbellMachineConfig, clusterSpec *cluster.Spec) error {
prevMachineConfig, err := p.providerKubectlClient.GetEksaTinkerbellMachineConfig(ctx, newConfig.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
if newConfig.Spec.OSFamily != prevMachineConfig.Spec.OSFamily {
return fmt.Errorf("spec.osFamily is immutable. Previous value %v, New value %v", prevMachineConfig.Spec.OSFamily, newConfig.Spec.OSFamily)
}
if newConfig.Spec.Users[0].SshAuthorizedKeys[0] != prevMachineConfig.Spec.Users[0].SshAuthorizedKeys[0] {
return fmt.Errorf("spec.Users[0].SshAuthorizedKeys[0] is immutable. Previous value %s, New value %s", prevMachineConfig.Spec.Users[0].SshAuthorizedKeys[0], newConfig.Spec.Users[0].SshAuthorizedKeys[0])
}
if newConfig.Spec.Users[0].Name != prevMachineConfig.Spec.Users[0].Name {
return fmt.Errorf("spec.Users[0].Name is immutable. Previous value %s, New value %s", prevMachineConfig.Spec.Users[0].Name, newConfig.Spec.Users[0].Name)
}
if !reflect.DeepEqual(newConfig.Spec.HardwareSelector, prevMachineConfig.Spec.HardwareSelector) {
return fmt.Errorf("spec.HardwareSelector is immutable. Previous value %v, New value %v", prevMachineConfig.Spec.HardwareSelector, newConfig.Spec.HardwareSelector)
}
return nil
}
func machineRefSliceToMap(machineRefs []v1alpha1.Ref) map[string]v1alpha1.Ref {
refMap := make(map[string]v1alpha1.Ref, len(machineRefs))
for _, ref := range machineRefs {
refMap[ref.Name] = ref
}
return refMap
}
// PreCoreComponentsUpgrade staisfies the Provider interface.
func (p *Provider) PreCoreComponentsUpgrade(
ctx context.Context,
cluster *types.Cluster,
clusterSpec *cluster.Spec,
) error {
// When a workload cluster the cluster object could be nil. Noop if it is.
if cluster == nil {
logger.V(4).Info("Cluster object is nil, assuming it is a workload cluster with no " +
"Tinkerbell stack to upgrade")
return nil
}
if clusterSpec == nil {
return errors.New("cluster spec is nil")
}
// Attempt the upgrade. This should upgrade the stack in the mangement cluster by updating
// images, installing new CRDs and possibly removing old ones.
err := p.stackInstaller.Upgrade(
ctx,
clusterSpec.VersionsBundle.Tinkerbell,
p.datacenterConfig.Spec.TinkerbellIP,
cluster.KubeconfigFile,
p.datacenterConfig.Spec.HookImagesURLPath,
)
if err != nil {
return fmt.Errorf("upgrading stack: %v", err)
}
hasBaseboardManagement, err := p.providerKubectlClient.HasCRD(
ctx,
rufiounreleased.BaseboardManagementResourceName,
cluster.KubeconfigFile,
)
if err != nil {
return fmt.Errorf("upgrading rufio crds: %v", err)
}
// We introduced the Rufio dependency prior to its initial release. Between its introduction
// and its official release breaking changes occured to the CRDs. We're using the presence
// of the obsolete BaseboardManagement CRD to determine if there's an old Rufio installed.
// If there is, we need to convert all obsolete BaseboardManagement CRs to Machine CRs (the
// CRD that superseeds BaseboardManagement).
if hasBaseboardManagement {
if err := p.handleRufioUnreleasedCRDs(ctx, cluster); err != nil {
return fmt.Errorf("upgrading rufio crds: %v", err)
}
// Remove the unreleased Rufio CRDs from the cluster; this will also remove any residual
// resources.
err = p.providerKubectlClient.DeleteCRD(
ctx,
rufiounreleased.BaseboardManagementResourceName,
cluster.KubeconfigFile,
)
if err != nil {
return fmt.Errorf("could not delete machines crd: %v", err)
}
}
return nil
}
func (p *Provider) handleRufioUnreleasedCRDs(ctx context.Context, cluster *types.Cluster) error {
// Firstly, retrieve all BaseboardManagement CRs and convert them to Machine CRs.
bm, err := p.providerKubectlClient.AllBaseboardManagements(
ctx,
cluster.KubeconfigFile,
)
if err != nil {
return fmt.Errorf("retrieving baseboardmanagement resources: %v", err)
}
if len(bm) > 0 {
serialized, err := yaml.Serialize(toRufioMachines(bm)...)
if err != nil {
return fmt.Errorf("serializing machines: %v", err)
}
err = p.providerKubectlClient.ApplyKubeSpecFromBytesWithNamespace(
ctx,
cluster,
yaml.Join(serialized),
p.stackInstaller.GetNamespace(),
)
if err != nil {
return fmt.Errorf("applying machines: %v", err)
}
}
// Secondly, iterate over all Hardwarfe CRs and update the BMCRef to point to the new Machine
// CR.
hardware, err := p.providerKubectlClient.AllTinkerbellHardware(ctx, cluster.KubeconfigFile)
if err != nil {
return fmt.Errorf("retrieving hardware resources: %v", err)
}
var updatedHardware []tinkv1alpha1.Hardware
for _, h := range hardware {
if h.Spec.BMCRef != nil {
h.Spec.BMCRef.Kind = "Machine"
updatedHardware = append(updatedHardware, h)
}
}
if len(updatedHardware) > 0 {
serialized, err := yaml.Serialize(updatedHardware...)
if err != nil {
return fmt.Errorf("serializing hardware: %v", err)
}
err = p.providerKubectlClient.ApplyKubeSpecFromBytesForce(ctx, cluster, yaml.Join(serialized))
if err != nil {
return fmt.Errorf("applying hardware: %v", err)
}
}
return nil
}
func toRufioMachines(items []rufiounreleased.BaseboardManagement) []rufiov1.Machine {
var machines []rufiov1.Machine
for _, item := range items {
machines = append(machines, rufiov1.Machine{
// We need to populate type meta because we apply with kubectl (leakage).
TypeMeta: metav1.TypeMeta{
Kind: "Machine",
APIVersion: rufiov1.GroupVersion.String(),
},
ObjectMeta: item.ObjectMeta,
Spec: rufiov1.MachineSpec{
Connection: rufiov1.Connection{
AuthSecretRef: item.Spec.Connection.AuthSecretRef,
Host: item.Spec.Connection.Host,
Port: item.Spec.Connection.Port,
InsecureTLS: item.Spec.Connection.InsecureTLS,
},
},
})
}
return machines
}
| 507 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"errors"
"fmt"
"strings"
"testing"
"github.com/golang/mock/gomock"
rufiov1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
filewritermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/rufiounreleased"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
stackmocks "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/yaml"
)
func TestProviderPreCoreComponentsUpgrade_NilClusterSpec(t *testing.T) {
tconfig := NewPreCoreComponentsUpgradeTestConfig(t)
provider, err := tconfig.GetProvider()
if err != nil {
t.Fatalf("Received unexpected error creating provider: %v", err)
}
err = provider.PreCoreComponentsUpgrade(
context.Background(),
tconfig.Management,
nil,
)
expect := "cluster spec is nil"
if err == nil || !strings.Contains(err.Error(), expect) {
t.Fatalf("Expected error containing '%v'; Received '%v'", expect, err)
}
}
func TestProviderPreCoreComponentsUpgrade_NilCluster(t *testing.T) {
tconfig := NewPreCoreComponentsUpgradeTestConfig(t)
provider, err := tconfig.GetProvider()
if err != nil {
t.Fatalf("Received unexpected error creating provider: %v", err)
}
err = provider.PreCoreComponentsUpgrade(
context.Background(),
nil,
tconfig.ClusterSpec,
)
if err != nil {
t.Fatalf("Received unexpected error: %v", err)
}
}
func TestProviderPreCoreComponentsUpgrade_StackUpgradeError(t *testing.T) {
tconfig := NewPreCoreComponentsUpgradeTestConfig(t)
expect := "foobar"
tconfig.Installer.EXPECT().
Upgrade(
gomock.Any(),
tconfig.ClusterSpec.VersionsBundle.Tinkerbell,
tconfig.DatacenterConfig.Spec.TinkerbellIP,
tconfig.Management.KubeconfigFile,
tconfig.DatacenterConfig.Spec.HookImagesURLPath,
).
Return(errors.New(expect))
provider, err := tconfig.GetProvider()
if err != nil {
t.Fatalf("Couldn't create the provider: %v", err)
}
err = provider.PreCoreComponentsUpgrade(context.Background(), tconfig.Management, tconfig.ClusterSpec)
if err == nil || !strings.Contains(err.Error(), expect) {
t.Fatalf("Expected error containing '%v'; Received '%v'", expect, err)
}
}
func TestProviderPreCoreComponentsUpgrade_HasBaseboardManagementCRDError(t *testing.T) {
tconfig := NewPreCoreComponentsUpgradeTestConfig(t)
tconfig.Installer.EXPECT().
Upgrade(
gomock.Any(),
tconfig.ClusterSpec.VersionsBundle.Tinkerbell,
tconfig.TinkerbellIP,
tconfig.Management.KubeconfigFile,
tconfig.DatacenterConfig.Spec.HookImagesURLPath,
).
Return(nil)
expect := "foobar"
tconfig.KubeClient.EXPECT().
HasCRD(
gomock.Any(),
rufiounreleased.BaseboardManagementResourceName,
tconfig.Management.KubeconfigFile,
).
Return(false, errors.New(expect))
provider, err := tconfig.GetProvider()
if err != nil {
t.Fatalf("Couldn't create the provider: %v", err)
}
err = provider.PreCoreComponentsUpgrade(context.Background(), tconfig.Management, tconfig.ClusterSpec)
if err == nil || !strings.Contains(err.Error(), expect) {
t.Fatalf("Expected error containing '%v'; Received '%v'", expect, err)
}
}
func TestProviderPreCoreComponentsUpgrade_NoBaseboardManagementCRD(t *testing.T) {
tconfig := NewPreCoreComponentsUpgradeTestConfig(t)
tconfig.Installer.EXPECT().
Upgrade(
gomock.Any(),
tconfig.ClusterSpec.VersionsBundle.Tinkerbell,
tconfig.TinkerbellIP,
tconfig.Management.KubeconfigFile,
tconfig.DatacenterConfig.Spec.HookImagesURLPath,
).
Return(nil)
tconfig.KubeClient.EXPECT().
HasCRD(gomock.Any(), rufiounreleased.BaseboardManagementResourceName, tconfig.Management.KubeconfigFile).
Return(false, nil)
provider, err := tconfig.GetProvider()
if err != nil {
t.Fatalf("Couldn't create the provider: %v", err)
}
err = provider.PreCoreComponentsUpgrade(context.Background(), tconfig.Management, tconfig.ClusterSpec)
if err != nil {
t.Fatalf("Received unexpected error: %v", err)
}
}
func TestProviderPreCoreComponentsUpgrade_RufioConversions(t *testing.T) {
stackNamespace := "stack-namespace"
tests := []struct {
Name string
Hardware []tinkv1.Hardware
BaseboardManagements []rufiounreleased.BaseboardManagement
ExpectMachines []rufiov1.Machine
ExpectHardware []tinkv1.Hardware
}{
{
Name: "NoBaseboardManagementsOrHardware",
},
{
Name: "SingleBaseboardManagement",
BaseboardManagements: []rufiounreleased.BaseboardManagement{
{
Spec: rufiounreleased.BaseboardManagementSpec{
Connection: rufiounreleased.Connection{
Host: "host1",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name1",
Namespace: "namespace1",
},
InsecureTLS: true,
},
},
},
},
ExpectMachines: []rufiov1.Machine{
PopulateRufioV1MachineMeta(rufiov1.Machine{
Spec: rufiov1.MachineSpec{
Connection: rufiov1.Connection{
Host: "host1",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name1",
Namespace: "namespace1",
},
InsecureTLS: true,
},
},
}),
},
},
{
Name: "MultiBaseboardManagement",
BaseboardManagements: []rufiounreleased.BaseboardManagement{
{
Spec: rufiounreleased.BaseboardManagementSpec{
Connection: rufiounreleased.Connection{
Host: "host1",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name1",
Namespace: "namespace1",
},
InsecureTLS: true,
},
},
},
{
Spec: rufiounreleased.BaseboardManagementSpec{
Connection: rufiounreleased.Connection{
Host: "host2",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name2",
Namespace: "namespace2",
},
InsecureTLS: true,
},
},
},
{
Spec: rufiounreleased.BaseboardManagementSpec{
Connection: rufiounreleased.Connection{
Host: "host3",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name3",
Namespace: "namespace3",
},
InsecureTLS: true,
},
},
},
},
ExpectMachines: []rufiov1.Machine{
PopulateRufioV1MachineMeta(rufiov1.Machine{
Spec: rufiov1.MachineSpec{
Connection: rufiov1.Connection{
Host: "host1",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name1",
Namespace: "namespace1",
},
InsecureTLS: true,
},
},
}),
PopulateRufioV1MachineMeta(rufiov1.Machine{
Spec: rufiov1.MachineSpec{
Connection: rufiov1.Connection{
Host: "host2",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name2",
Namespace: "namespace2",
},
InsecureTLS: true,
},
},
}),
PopulateRufioV1MachineMeta(rufiov1.Machine{
Spec: rufiov1.MachineSpec{
Connection: rufiov1.Connection{
Host: "host3",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name3",
Namespace: "namespace3",
},
InsecureTLS: true,
},
},
}),
},
},
{
Name: "SingleHardware",
Hardware: []tinkv1.Hardware{
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Kind: "BaseboardManagement",
Name: "bm1",
},
},
},
},
ExpectHardware: []tinkv1.Hardware{
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Kind: "Machine",
Name: "bm1",
},
},
},
},
},
{
Name: "MultiHardware",
Hardware: []tinkv1.Hardware{
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Name: "bm1",
},
},
},
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Name: "bm2",
},
},
},
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Name: "bm3",
},
},
},
},
ExpectHardware: []tinkv1.Hardware{
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Kind: "Machine",
Name: "bm1",
},
},
},
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Kind: "Machine",
Name: "bm2",
},
},
},
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Kind: "Machine",
Name: "bm3",
},
},
},
},
},
{
Name: "HardwareWithoutBMCRef",
Hardware: []tinkv1.Hardware{
{
Spec: tinkv1.HardwareSpec{},
},
{
Spec: tinkv1.HardwareSpec{},
},
{
Spec: tinkv1.HardwareSpec{},
},
},
},
{
Name: "MultiBaseboardManagementAndHardware",
BaseboardManagements: []rufiounreleased.BaseboardManagement{
{
Spec: rufiounreleased.BaseboardManagementSpec{
Connection: rufiounreleased.Connection{
Host: "host1",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name1",
Namespace: "namespace1",
},
InsecureTLS: true,
},
},
},
},
ExpectMachines: []rufiov1.Machine{
PopulateRufioV1MachineMeta(rufiov1.Machine{
Spec: rufiov1.MachineSpec{
Connection: rufiov1.Connection{
Host: "host1",
Port: 443,
AuthSecretRef: corev1.SecretReference{
Name: "name1",
Namespace: "namespace1",
},
InsecureTLS: true,
},
},
}),
},
Hardware: []tinkv1.Hardware{
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Kind: "BaseboardManagement",
Name: "bm1",
},
},
},
{
Spec: tinkv1.HardwareSpec{},
},
},
ExpectHardware: []tinkv1.Hardware{
{
Spec: tinkv1.HardwareSpec{
BMCRef: &v1.TypedLocalObjectReference{
Kind: "Machine",
Name: "bm1",
},
},
},
},
},
}
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
tconfig := NewPreCoreComponentsUpgradeTestConfig(t)
// Configure the mocks to successfully upgrade the Tinkerbell stack using the installer
// and identify the need to convert deprecated Rufio custom resources.
tconfig.Installer.EXPECT().
Upgrade(
gomock.Any(),
tconfig.ClusterSpec.VersionsBundle.Tinkerbell,
tconfig.DatacenterConfig.Spec.TinkerbellIP,
tconfig.Management.KubeconfigFile,
tconfig.DatacenterConfig.Spec.HookImagesURLPath,
).
Return(nil)
tconfig.KubeClient.EXPECT().
HasCRD(
gomock.Any(),
rufiounreleased.BaseboardManagementResourceName,
tconfig.Management.KubeconfigFile,
).
Return(true, nil)
// We minimally expect calls out to the cluster to retrieve BaseboardManagement and
// Hardware resources.
tconfig.KubeClient.EXPECT().
AllBaseboardManagements(gomock.Any(), tconfig.Management.KubeconfigFile).
Return(tc.BaseboardManagements, nil)
tconfig.KubeClient.EXPECT().
AllTinkerbellHardware(gomock.Any(), tconfig.Management.KubeconfigFile).
Return(tc.Hardware, nil)
if len(tc.ExpectMachines) > 0 {
tconfig.Installer.EXPECT().
GetNamespace().
Return(stackNamespace)
// Serialize the expected rufiov1#Machine objects into YAML so we can use gomock
// to expect that value.
serialized, err := yaml.Serialize(tc.ExpectMachines...)
if err != nil {
t.Fatalf("Could not serialize expected machines: %v", serialized)
}
expect := yaml.Join(serialized)
tconfig.KubeClient.EXPECT().
ApplyKubeSpecFromBytesWithNamespace(
gomock.Any(),
tconfig.Management,
expect,
stackNamespace,
).
Return(nil)
}
if len(tc.ExpectHardware) > 0 {
// Serialize the expected tinkv1#Hardware objects into YAML so we can use gomock
// to expect that value.
serialized, err := yaml.Serialize(tc.ExpectHardware...)
if err != nil {
t.Fatalf("Could not serialize expected hardware: %v", err)
}
expect := yaml.Join(serialized)
tconfig.KubeClient.EXPECT().
ApplyKubeSpecFromBytesForce(gomock.Any(), tconfig.Management, expect).
Return(nil)
}
// We always attempt to delete
tconfig.KubeClient.EXPECT().
DeleteCRD(
gomock.Any(),
rufiounreleased.BaseboardManagementResourceName,
tconfig.Management.KubeconfigFile,
).
Return(nil)
provider, err := tconfig.GetProvider()
if err != nil {
t.Fatalf("Couldn't create the provider: %v", err)
}
err = provider.PreCoreComponentsUpgrade(
context.Background(),
tconfig.Management,
tconfig.ClusterSpec,
)
if err != nil {
t.Fatalf("Received unexpected error: %v", err)
}
})
}
}
// PopulateRufioV1MachneMeta populates m's TypeMeta with Rufio v1 Machine API Version and Kind.
func PopulateRufioV1MachineMeta(m rufiov1.Machine) rufiov1.Machine {
m.TypeMeta = metav1.TypeMeta{
APIVersion: "bmc.tinkerbell.org/v1alpha1",
Kind: "Machine",
}
return m
}
// PreCoreComponentsUpgradeTestConfig is a test helper that contains the necessary pieces for
// testing the PreCoreComponentsUpgrade functionality.
type PreCoreComponentsUpgradeTestConfig struct {
Ctrl *gomock.Controller
Docker *stackmocks.MockDocker
Helm *stackmocks.MockHelm
KubeClient *mocks.MockProviderKubectlClient
Installer *stackmocks.MockStackInstaller
Writer *filewritermocks.MockFileWriter
TinkerbellIP string
ClusterSpec *cluster.Spec
DatacenterConfig *v1alpha1.TinkerbellDatacenterConfig
MachineConfigs map[string]*v1alpha1.TinkerbellMachineConfig
Management *types.Cluster
}
// NewPreCoreComponentsUpgradeTestConfig creates a new PreCoreComponentsUpgradeTestConfig with
// all mocks initialized and test data available.
func NewPreCoreComponentsUpgradeTestConfig(t *testing.T) *PreCoreComponentsUpgradeTestConfig {
t.Helper()
ctrl := gomock.NewController(t)
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
cfg := &PreCoreComponentsUpgradeTestConfig{
Ctrl: ctrl,
Docker: stackmocks.NewMockDocker(ctrl),
Helm: stackmocks.NewMockHelm(ctrl),
KubeClient: mocks.NewMockProviderKubectlClient(ctrl),
Installer: stackmocks.NewMockStackInstaller(ctrl),
Writer: filewritermocks.NewMockFileWriter(ctrl),
TinkerbellIP: "1.1.1.1",
ClusterSpec: clusterSpec,
DatacenterConfig: datacenterConfig,
MachineConfigs: machineConfigs,
Management: &types.Cluster{KubeconfigFile: "kubeconfig-file"},
}
cfg.DatacenterConfig.Spec.TinkerbellIP = cfg.TinkerbellIP
return cfg
}
// GetProvider retrieves a new Tinkerbell provider instance build using the mocks initialized
// in t.
func (t *PreCoreComponentsUpgradeTestConfig) GetProvider() (*Provider, error) {
p, err := NewProvider(
t.DatacenterConfig,
t.MachineConfigs,
t.ClusterSpec.Cluster,
"",
t.Writer,
t.Docker,
t.Helm,
t.KubeClient,
testIP,
test.FakeNow,
false,
false,
)
if err != nil {
return nil, err
}
p.SetStackInstaller(t.Installer)
return p, nil
}
// WithStackUpgrade configures t mocks to get successfully reach Rufio CRD conversion.
func (t *PreCoreComponentsUpgradeTestConfig) WithStackUpgrade() *PreCoreComponentsUpgradeTestConfig {
t.Installer.EXPECT().
Upgrade(
gomock.Any(),
t.ClusterSpec.VersionsBundle.Tinkerbell,
t.TinkerbellIP,
t.Management.KubeconfigFile,
t.DatacenterConfig.Spec.HookImagesURLPath,
).
Return(nil)
t.KubeClient.EXPECT().
HasCRD(
gomock.Any(),
rufiounreleased.BaseboardManagementResourceName,
t.Management.KubeconfigFile,
).
Return(true, nil)
return t
}
func newTinkerbellProvider(datacenterConfig *v1alpha1.TinkerbellDatacenterConfig, machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig, clusterConfig *v1alpha1.Cluster, writer filewriter.FileWriter, docker stack.Docker, helm stack.Helm, kubectl ProviderKubectlClient) *Provider {
hardwareFile := "./testdata/hardware.csv"
forceCleanup := false
provider, err := NewProvider(
datacenterConfig,
machineConfigs,
clusterConfig,
hardwareFile,
writer,
docker,
helm,
kubectl,
testIP,
test.FakeNow,
forceCleanup,
false,
)
if err != nil {
panic(err)
}
return provider
}
func TestProviderSetupAndValidateManagementProxySuccess(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_proxy.yaml"
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()
provider := newTinkerbellProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl)
provider.stackInstaller = stackInstaller
clusterSpec.ManagementCluster = &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"}
clusterSpec.Cluster.Spec.ManagementCluster = v1alpha1.ManagementCluster{Name: "test-mgmt"}
clusterSpec.Cluster.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4:3128",
HttpsProxy: "1.2.3.4:3128",
}
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.Cluster.Spec.ManagementCluster.Name).Return(clusterSpec.Cluster, nil)
stackInstaller.EXPECT().AddNoProxyIP(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host).Return()
kubectl.EXPECT().ApplyKubeSpecFromBytesForce(ctx, clusterSpec.ManagementCluster, gomock.Any()).Return(nil)
kubectl.EXPECT().WaitForRufioMachines(ctx, clusterSpec.ManagementCluster, "5m", "Contactable", gomock.Any()).Return(nil)
err := provider.SetupAndValidateUpgradeCluster(ctx, clusterSpec.ManagementCluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("Received unexpected error: %v", err)
}
}
func TestProviderSetupAndValidateManagementProxyError(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_proxy.yaml"
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()
provider := newTinkerbellProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl)
provider.stackInstaller = stackInstaller
clusterSpec.ManagementCluster = &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"}
clusterSpec.Cluster.Spec.ManagementCluster = v1alpha1.ManagementCluster{Name: "test-mgmt"}
clusterSpec.Cluster.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4:3128",
HttpsProxy: "1.2.3.4:3128",
}
kubectl.EXPECT().GetUnprovisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetProvisionedTinkerbellHardware(ctx, clusterSpec.ManagementCluster.KubeconfigFile, constants.EksaSystemNamespace).Return([]tinkv1alpha1.Hardware{}, nil)
kubectl.EXPECT().GetEksaCluster(ctx, clusterSpec.ManagementCluster, clusterSpec.Cluster.Spec.ManagementCluster.Name).Return(clusterSpec.Cluster, fmt.Errorf("error getting management cluster data"))
err := provider.SetupAndValidateUpgradeCluster(ctx, clusterSpec.ManagementCluster, clusterSpec, clusterSpec)
assertError(t, "error getting management cluster data", err)
}
| 720 |
eks-anywhere | aws | Go | package tinkerbell
import (
"fmt"
"strings"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/networkutils"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func validateOsFamily(spec *ClusterSpec) error {
controlPlaneRef := spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef
controlPlaneOsFamily := spec.MachineConfigs[controlPlaneRef.Name].OSFamily()
if spec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdMachineRef := spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef
if spec.MachineConfigs[etcdMachineRef.Name].OSFamily() != controlPlaneOsFamily {
return fmt.Errorf("etcd osFamily cannot be different from control plane osFamily")
}
}
for _, group := range spec.Cluster.Spec.WorkerNodeGroupConfigurations {
groupRef := group.MachineGroupRef
if spec.MachineConfigs[groupRef.Name].OSFamily() != controlPlaneOsFamily {
return fmt.Errorf("worker node group osFamily cannot be different from control plane osFamily")
}
}
if controlPlaneOsFamily != v1alpha1.Bottlerocket && spec.DatacenterConfig.Spec.OSImageURL == "" {
return fmt.Errorf("please use bottlerocket as osFamily for auto-importing or provide a valid osImageURL")
}
return nil
}
func validateMachineRefExists(
ref *v1alpha1.Ref,
machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig,
) error {
if _, ok := machineConfigs[ref.Name]; !ok {
return fmt.Errorf("missing machine config ref: kind=%v; name=%v", ref.Kind, ref.Name)
}
return nil
}
func validateMachineConfigNamespacesMatchDatacenterConfig(
datacenterConfig *v1alpha1.TinkerbellDatacenterConfig,
machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig,
) error {
for _, machineConfig := range machineConfigs {
if machineConfig.Namespace != datacenterConfig.Namespace {
return fmt.Errorf(
"TinkerbellMachineConfig's namespace must match TinkerbellDatacenterConfig's namespace: %v",
machineConfig.Name,
)
}
}
return nil
}
func validateIPUnused(client networkutils.NetClient, ip string) error {
if networkutils.IsIPInUse(client, ip) {
return fmt.Errorf("ip in use: %v", ip)
}
return nil
}
func validatePortsAvailable(client networkutils.NetClient, host string) error {
unavailablePorts := getPortsUnavailable(client, host)
if len(unavailablePorts) != 0 {
return fmt.Errorf("localhost ports [%v] are already in use, please ensure these ports are available", strings.Join(unavailablePorts, ", "))
}
return nil
}
func getPortsUnavailable(client networkutils.NetClient, host string) []string {
ports := []string{"80", "42113", "50061"}
var unavailablePorts []string
for _, port := range ports {
if networkutils.IsPortInUse(client, host, port) {
unavailablePorts = append(unavailablePorts, port)
}
}
return unavailablePorts
}
// minimumHardwareRequirement defines the minimum requirement for a hardware selector.
type minimumHardwareRequirement struct {
// MinCount is the minimum number of hardware required to satisfy the requirement
MinCount int
// Selector defines what labels should be present on Hardware to consider it eligable for
// this requirement.
Selector v1alpha1.HardwareSelector
// count is used internally by validation to sum the actual available hardware.
count int
}
// minimumHardwareRequirements is a collection of minimumHardwareRequirement instances.
// it stores requirements in a map where the key is derived from selectors. This ensures selectors
// specifying the same key-value pairs are combined.
type minimumHardwareRequirements map[string]*minimumHardwareRequirement
// Add a minimumHardwareRequirement to r.
func (r *minimumHardwareRequirements) Add(selector v1alpha1.HardwareSelector, min int) error {
name, err := selector.ToString()
if err != nil {
return err
}
(*r)[name] = &minimumHardwareRequirement{
MinCount: min,
Selector: selector,
}
return nil
}
// validateminimumHardwareRequirements validates all requirements can be satisfied using hardware
// registered with catalogue.
func validateMinimumHardwareRequirements(requirements minimumHardwareRequirements, catalogue *hardware.Catalogue) error {
// Count all hardware that meets the selector requirements for each requirement.
// This does not consider whether or not a piece of hardware is selectable by multiple
// selectors. That requires a different validation ideally run before this one.
for _, h := range catalogue.AllHardware() {
for _, r := range requirements {
if hardware.LabelsMatchSelector(r.Selector, h.Labels) {
r.count++
}
}
}
// Validate counts of hardware meet the minimum required count.
for name, r := range requirements {
if r.count < r.MinCount {
return fmt.Errorf(
"minimum hardware count not met for selector '%v': have %v, require %v",
name,
r.count,
r.MinCount,
)
}
}
return nil
}
// validateHardwareSatifiesOnlyOneSelector ensures hardware in allHardware meets one and only one
// selector in selectors. selectors uses the selectorSet construct to ensure we don't
// operate on duplicate selectors given a selector can be re-used among groups as they may reference
// the same TinkerbellMachineConfig.
func validateHardwareSatisfiesOnlyOneSelector(allHardware []*tinkv1alpha1.Hardware, selectors selectorSet) error {
for _, h := range allHardware {
if matches := getMatchingHardwareSelectors(h, selectors); len(matches) > 1 {
slctrStrs, err := getHardwareSelectorsAsStrings(matches)
if err != nil {
return err
}
return fmt.Errorf(
"hardware must only satisfy 1 selector: hardware name '%v'; selectors '%v'",
h.Name,
strings.Join(slctrStrs, ", "),
)
}
}
return nil
}
// selectorSet defines a set of selectors. Selectors should be added using the Add method to ensure
// deterministic key generation. The construct is useful to avoid treating selectors that are the
// same as different.
type selectorSet map[string]v1alpha1.HardwareSelector
// Add adds selector to ss.
func (ss *selectorSet) Add(selector v1alpha1.HardwareSelector) error {
slctrStr, err := selector.ToString()
if err != nil {
return err
}
(*ss)[slctrStr] = selector
return nil
}
func getMatchingHardwareSelectors(
hw *tinkv1alpha1.Hardware,
selectors selectorSet,
) []v1alpha1.HardwareSelector {
var satisfies []v1alpha1.HardwareSelector
for _, selector := range selectors {
if hardware.LabelsMatchSelector(selector, hw.Labels) {
satisfies = append(satisfies, selector)
}
}
return satisfies
}
func getHardwareSelectorsAsStrings(selectors []v1alpha1.HardwareSelector) ([]string, error) {
var slctrs []string
for _, selector := range selectors {
s, err := selector.ToString()
if err != nil {
return nil, err
}
slctrs = append(slctrs, s)
}
return slctrs, nil
}
| 214 |
eks-anywhere | aws | Go | package tinkerbell
import (
"context"
"github.com/go-logr/logr"
"github.com/pkg/errors"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
capiyaml "github.com/aws/eks-anywhere/pkg/clusterapi/yaml"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
type (
// Workers represents the Tinkerbell specific CAPI spec for worker nodes.
Workers = clusterapi.Workers[*tinkerbellv1.TinkerbellMachineTemplate]
workersBuilder = capiyaml.WorkersBuilder[*tinkerbellv1.TinkerbellMachineTemplate]
)
// WorkersSpec generates a Tinkerbell specific CAPI spec for an eks-a cluster worker nodes.
// It talks to the cluster with a client to detect changes in immutable objects and generates new
// names for them.
func WorkersSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*Workers, error) {
templateBuilder, err := generateTemplateBuilder(spec)
if err != nil {
return nil, errors.Wrap(err, "generating tinkerbell template builder")
}
workerTemplateNames, kubeadmTemplateNames := clusterapi.InitialTemplateNamesForWorkers(spec)
workersYaml, err := templateBuilder.GenerateCAPISpecWorkers(spec, workerTemplateNames, kubeadmTemplateNames)
if err != nil {
return nil, errors.Wrap(err, "generating tinkerbell workers yaml spec")
}
parser, builder, err := newWorkersParserAndBuilder(logger)
if err != nil {
return nil, err
}
if err = parser.Parse(workersYaml, builder); err != nil {
return nil, errors.Wrap(err, "parsing Tinkerbell CAPI workers yaml")
}
workers := builder.Workers
if err = workers.UpdateImmutableObjectNames(ctx, client, GetMachineTemplate, machineTemplateEqual); err != nil {
return nil, errors.Wrap(err, "updating Tinkerbell worker immutable object names")
}
return workers, nil
}
func newWorkersParserAndBuilder(logger logr.Logger) (*yamlutil.Parser, *workersBuilder, error) {
parser, builder, err := capiyaml.NewWorkersParserAndBuilder(
logger,
machineTemplateMapping(),
)
if err != nil {
return nil, nil, errors.Wrap(err, "building Tinkerbell workers parser and builder")
}
return parser, builder, nil
}
func machineTemplateMapping() yamlutil.Mapping[*tinkerbellv1.TinkerbellMachineTemplate] {
return yamlutil.NewMapping(
"TinkerbellMachineTemplate",
func() *tinkerbellv1.TinkerbellMachineTemplate {
return &tinkerbellv1.TinkerbellMachineTemplate{}
},
)
}
| 75 |
eks-anywhere | aws | Go | package tinkerbell_test
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestWorkersSpecNewCluster(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_tinkerbell_multiple_node_groups.yaml")
client := test.NewFakeKubeClient()
workers, err := tinkerbell.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workers).NotTo(BeNil())
g.Expect(workers.Groups).To(HaveLen(2))
g.Expect(workers.Groups).To(ConsistOf(
clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(),
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: machineTemplate(),
},
clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(
func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Name = "test-md-1-1"
},
),
MachineDeployment: machineDeployment(
func(md *clusterv1.MachineDeployment) {
md.Name = "test-md-1"
md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1"
md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1"
md.Spec.Replicas = ptr.Int32(1)
md.Labels["pool"] = "md-1"
md.Spec.Template.ObjectMeta.Labels["pool"] = "md-1"
md.Spec.Strategy = &clusterv1.MachineDeploymentStrategy{
Type: "",
RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{Type: 0, IntVal: 3, StrVal: ""},
MaxSurge: &intstr.IntOrString{Type: 0, IntVal: 5, StrVal: ""},
DeletePolicy: nil,
},
}
},
),
ProviderMachineTemplate: machineTemplate(
func(tmt *tinkerbellv1.TinkerbellMachineTemplate) {
tmt.Name = "test-md-1-1"
},
),
},
))
}
func TestWorkersSpecUpgradeClusterNoMachineTemplateChanges(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_tinkerbell_multiple_node_groups.yaml")
oldGroup1 := &clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(),
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: machineTemplate(),
}
oldGroup2 := &clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(
func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Name = "test-md-1-1"
},
),
MachineDeployment: machineDeployment(
func(md *clusterv1.MachineDeployment) {
md.Name = "test-md-1"
md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1"
md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1"
md.Spec.Replicas = ptr.Int32(1)
md.Labels["pool"] = "md-1"
md.Spec.Template.ObjectMeta.Labels["pool"] = "md-1"
md.Spec.Strategy = &clusterv1.MachineDeploymentStrategy{
Type: "",
RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{Type: 0, IntVal: 3, StrVal: ""},
MaxSurge: &intstr.IntOrString{Type: 0, IntVal: 5, StrVal: ""},
DeletePolicy: nil,
},
}
},
),
ProviderMachineTemplate: machineTemplate(
func(vmt *tinkerbellv1.TinkerbellMachineTemplate) {
vmt.Name = "test-md-1-1"
},
),
}
expectedGroup1 := oldGroup1.DeepCopy()
expectedGroup2 := oldGroup2.DeepCopy()
oldGroup1.ProviderMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now())
oldGroup2.ProviderMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now())
client := test.NewFakeKubeClient(
oldGroup1.MachineDeployment,
oldGroup1.KubeadmConfigTemplate,
oldGroup1.ProviderMachineTemplate,
oldGroup2.MachineDeployment,
oldGroup2.KubeadmConfigTemplate,
oldGroup2.ProviderMachineTemplate,
)
workers, err := tinkerbell.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workers).NotTo(BeNil())
g.Expect(workers.Groups).To(HaveLen(2))
g.Expect(workers.Groups).To(ConsistOf(*expectedGroup1, *expectedGroup2))
}
func TestWorkersSpecMachineTemplateNotFound(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_tinkerbell_multiple_node_groups.yaml")
client := test.NewFakeKubeClient(machineDeployment())
_, err := tinkerbell.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
}
func TestWorkersSpecErrorFromClient(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_tinkerbell_multiple_node_groups.yaml")
client := test.NewFakeKubeClientAlwaysError()
_, err := tinkerbell.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).To(HaveOccurred())
}
func TestWorkersSpecRegistryMirrorInsecureSkipVerify(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_tinkerbell_multiple_node_groups.yaml")
client := test.NewFakeKubeClient()
tests := []struct {
name string
mirrorConfig *anywherev1.RegistryMirrorConfiguration
files []bootstrapv1.File
}{
{
name: "insecure skip verify",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerify(),
},
{
name: "insecure skip verify with cacert",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
spec.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig
workers, err := tinkerbell.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workers).NotTo(BeNil())
g.Expect(workers.Groups).To(HaveLen(2))
g.Expect(workers.Groups).To(ConsistOf(
clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...)
kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, test.RegistryMirrorSudoPreKubeadmCommands()...)
}),
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: machineTemplate(),
},
clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(
func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Name = "test-md-1-1"
kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...)
kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, test.RegistryMirrorSudoPreKubeadmCommands()...)
},
),
MachineDeployment: machineDeployment(
func(md *clusterv1.MachineDeployment) {
md.Name = "test-md-1"
md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1"
md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1"
md.Spec.Replicas = ptr.Int32(1)
md.Labels["pool"] = "md-1"
md.Spec.Template.ObjectMeta.Labels["pool"] = "md-1"
md.Spec.Strategy = &clusterv1.MachineDeploymentStrategy{
Type: "",
RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{Type: 0, IntVal: 3, StrVal: ""},
MaxSurge: &intstr.IntOrString{Type: 0, IntVal: 5, StrVal: ""},
DeletePolicy: nil,
},
}
},
),
ProviderMachineTemplate: machineTemplate(
func(tmt *tinkerbellv1.TinkerbellMachineTemplate) {
tmt.Name = "test-md-1-1"
},
),
},
))
})
}
}
func machineDeployment(opts ...func(*clusterv1.MachineDeployment)) *clusterv1.MachineDeployment {
o := &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "MachineDeployment",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-md-0",
Namespace: "eksa-system",
Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test", "pool": "md-0"},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: "test",
Replicas: ptr.Int32(1),
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Template: clusterv1.MachineTemplateSpec{
ObjectMeta: clusterv1.ObjectMeta{
Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test", "pool": "md-0"},
},
Spec: clusterv1.MachineSpec{
ClusterName: "test",
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &corev1.ObjectReference{
Kind: "KubeadmConfigTemplate",
Name: "test-md-0-1",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
},
InfrastructureRef: corev1.ObjectReference{
Kind: "TinkerbellMachineTemplate",
Name: "test-md-0-1",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
Version: ptr.String("v1.21.2-eks-1-21-4"),
},
},
Strategy: &clusterv1.MachineDeploymentStrategy{
Type: "",
RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{Type: 0, IntVal: 0, StrVal: ""},
MaxSurge: &intstr.IntOrString{Type: 0, IntVal: 1, StrVal: ""},
DeletePolicy: nil,
},
},
},
}
for _, opt := range opts {
opt(o)
}
return o
}
func kubeadmConfigTemplate(opts ...func(*bootstrapv1.KubeadmConfigTemplate)) *bootstrapv1.KubeadmConfigTemplate {
o := &bootstrapv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmConfigTemplate",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-md-0-1",
Namespace: "eksa-system",
},
Spec: bootstrapv1.KubeadmConfigTemplateSpec{
Template: bootstrapv1.KubeadmConfigTemplateResource{
Spec: bootstrapv1.KubeadmConfigSpec{
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
Name: "",
CRISocket: "",
Taints: nil,
KubeletExtraArgs: map[string]string{
"anonymous-auth": "false",
"provider-id": "PROVIDER_ID",
"read-only-port": "0",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
Users: []bootstrapv1.User{
{
Name: "tink-user",
Sudo: ptr.String("ALL=(ALL) NOPASSWD:ALL"),
SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== [email protected]"},
},
},
Format: bootstrapv1.Format("cloud-config"),
},
},
},
}
for _, opt := range opts {
opt(o)
}
return o
}
func machineTemplate(opts ...func(*tinkerbellv1.TinkerbellMachineTemplate)) *tinkerbellv1.TinkerbellMachineTemplate {
o := &tinkerbellv1.TinkerbellMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-md-0-1",
Namespace: "eksa-system",
},
Spec: tinkerbellv1.TinkerbellMachineTemplateSpec{
Template: tinkerbellv1.TinkerbellMachineTemplateResource{
Spec: tinkerbellv1.TinkerbellMachineSpec{
TemplateOverride: "global_timeout: 6000\nid: \"\"\nname: tink-test\ntasks:\n- actions:\n - environment:\n COMPRESSED: \"true\"\n DEST_DISK: /dev/sda\n IMG_URL: \"\"\n image: image2disk:v1.0.0\n name: stream-image\n timeout: 360\n - environment:\n BLOCK_DEVICE: /dev/sda2\n CHROOT: \"y\"\n CMD_LINE: apt -y update && apt -y install openssl\n DEFAULT_INTERPRETER: /bin/sh -c\n FS_TYPE: ext4\n image: cexec:v1.0.0\n name: install-openssl\n timeout: 90\n - environment:\n CONTENTS: |\n network:\n version: 2\n renderer: networkd\n ethernets:\n eno1:\n dhcp4: true\n eno2:\n dhcp4: true\n eno3:\n dhcp4: true\n eno4:\n dhcp4: true\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/netplan/config.yaml\n DIRMODE: \"0755\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0644\"\n UID: \"0\"\n image: writefile:v1.0.0\n name: write-netplan\n timeout: 90\n - environment:\n CONTENTS: |\n datasource:\n Ec2:\n metadata_urls: []\n strict_id: false\n system_info:\n default_user:\n name: tink\n groups: [wheel, adm]\n sudo: [\"ALL=(ALL) NOPASSWD:ALL\"]\n shell: /bin/bash\n manage_etc_hosts: localhost\n warnings:\n dsid_missing_source: off\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0600\"\n image: writefile:v1.0.0\n name: add-tink-cloud-init-config\n timeout: 90\n - environment:\n CONTENTS: |\n datasource: Ec2\n DEST_DISK: /dev/sda2\n DEST_PATH: /etc/cloud/ds-identify.cfg\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0600\"\n UID: \"0\"\n image: writefile:v1.0.0\n name: add-tink-cloud-init-ds-config\n timeout: 90\n - environment:\n BLOCK_DEVICE: /dev/sda2\n FS_TYPE: ext4\n image: kexec:v1.0.0\n name: kexec-image\n pid: host\n timeout: 90\n name: tink-test\n volumes:\n - /dev:/dev\n - /dev/console:/dev/console\n - /lib/firmware:/lib/firmware:ro\n worker: '{{.device_1}}'\nversion: \"0.1\"\n",
HardwareAffinity: &tinkerbellv1.HardwareAffinity{
Required: []tinkerbellv1.HardwareAffinityTerm{
{
LabelSelector: metav1.LabelSelector{MatchLabels: map[string]string{"type": "worker"}},
},
},
},
},
},
},
}
for _, opt := range opts {
opt(o)
}
return o
}
| 367 |
eks-anywhere | aws | Go | package hardware
import (
"bufio"
"errors"
"fmt"
"io"
"os"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/templater"
)
// Indexer provides indexing behavior for objects.
type Indexer interface {
// Lookup retrieves objects associated with the index => value pair.
Lookup(index, value string) ([]interface{}, error)
// Insert inserts v int the index.
Insert(v interface{}) error
// IndexField associated index with fn such that Lookup may be used to retrieve objects.
IndexField(index string, fn KeyExtractorFunc)
// Remove deletes v from the index.
Remove(v interface{}) error
}
// Catalogue represents a catalogue of Tinkerbell hardware manifests to be used with Tinkerbells
// Kubefied back-end.
type Catalogue struct {
hardware []*tinkv1alpha1.Hardware
hardwareIndex Indexer
bmcs []*rufiov1alpha1.Machine
bmcIndex Indexer
secrets []*corev1.Secret
secretIndex Indexer
}
// CatalogueOption defines an option to be applied in Catalogue instantiation.
type CatalogueOption func(*Catalogue)
// NewCatalogue creates a new Catalogue instance.
func NewCatalogue(opts ...CatalogueOption) *Catalogue {
catalogue := &Catalogue{
hardwareIndex: NewFieldIndexer(&tinkv1alpha1.Hardware{}),
bmcIndex: NewFieldIndexer(&rufiov1alpha1.Machine{}),
secretIndex: NewFieldIndexer(&corev1.Secret{}),
}
for _, opt := range opts {
opt(catalogue)
}
return catalogue
}
// ParseYAMLCatalogueFromFile parses filename, a YAML document, using ParseYamlCatalogue.
func ParseYAMLCatalogueFromFile(catalogue *Catalogue, filename string) error {
fh, err := os.Open(filename)
if err != nil {
return err
}
return ParseYAMLCatalogue(catalogue, fh)
}
// ParseYAMLCatalogue parses a YAML document, r, that represents a set of Kubernetes manifests.
// Manifests parsed include CAPT Hardware, PBnJ BMCs and associated Core API Secret.
func ParseYAMLCatalogue(catalogue *Catalogue, r io.Reader) error {
document := yamlutil.NewYAMLReader(bufio.NewReader(r))
for {
manifest, err := document.Read()
if errors.Is(err, io.EOF) {
return nil
}
if err != nil {
return err
}
var resource unstructured.Unstructured
if err = yaml.Unmarshal(manifest, &resource); err != nil {
return err
}
switch resource.GetKind() {
case "Hardware":
if err := catalogueSerializedHardware(catalogue, manifest); err != nil {
return err
}
case "Machine":
if err := catalogueSerializedBMC(catalogue, manifest); err != nil {
return err
}
case "Secret":
if err := catalogueSerializedSecret(catalogue, manifest); err != nil {
return err
}
}
}
}
func catalogueSerializedHardware(catalogue *Catalogue, manifest []byte) error {
var hardware tinkv1alpha1.Hardware
if err := yaml.UnmarshalStrict(manifest, &hardware); err != nil {
return fmt.Errorf("unable to parse hardware manifest: %v", err)
}
if err := catalogue.InsertHardware(&hardware); err != nil {
return err
}
return nil
}
func catalogueSerializedBMC(catalogue *Catalogue, manifest []byte) error {
var bmc rufiov1alpha1.Machine
if err := yaml.UnmarshalStrict(manifest, &bmc); err != nil {
return fmt.Errorf("unable to parse bmc manifest: %v", err)
}
if err := catalogue.InsertBMC(&bmc); err != nil {
return err
}
return nil
}
func catalogueSerializedSecret(catalogue *Catalogue, manifest []byte) error {
var secret corev1.Secret
if err := yaml.UnmarshalStrict(manifest, &secret); err != nil {
return fmt.Errorf("unable to parse secret manifest: %v", err)
}
if err := catalogue.InsertSecret(&secret); err != nil {
return err
}
return nil
}
// MarshalCatalogue marshals c into YAML that can be submitted to a Kubernetes cluster.
func MarshalCatalogue(c *Catalogue) ([]byte, error) {
var marshallables []eksav1alpha1.Marshallable
for _, hw := range c.AllHardware() {
marshallables = append(marshallables, hw)
}
for _, bmc := range c.AllBMCs() {
marshallables = append(marshallables, bmc)
}
for _, secret := range c.AllSecrets() {
marshallables = append(marshallables, secret)
}
resources := make([][]byte, 0, len(marshallables))
for _, marshallable := range marshallables {
resource, err := yaml.Marshal(marshallable)
if err != nil {
return nil, fmt.Errorf("failed marshalling resource for hardware spec: %v", err)
}
resources = append(resources, resource)
}
return templater.AppendYamlResources(resources...), nil
}
// NewMachineCatalogueWriter creates a MachineWriter instance that writes Machine instances to
// catalogue including its Machine and Secret data.
func NewMachineCatalogueWriter(catalogue *Catalogue) MachineWriter {
return MultiMachineWriter(
NewHardwareCatalogueWriter(catalogue),
NewBMCCatalogueWriter(catalogue),
NewSecretCatalogueWriter(catalogue),
)
}
| 174 |
eks-anywhere | aws | Go | package hardware
import (
"github.com/tinkerbell/rufio/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/constants"
)
// IndexBMCs indexes BMC instances on index by extracfting the key using fn.
func (c *Catalogue) IndexBMCs(index string, fn KeyExtractorFunc) {
c.bmcIndex.IndexField(index, fn)
}
// InsertBMC inserts BMCs into the catalogue. If any indexes exist, the BMC is indexed.
func (c *Catalogue) InsertBMC(bmc *v1alpha1.Machine) error {
if err := c.bmcIndex.Insert(bmc); err != nil {
return err
}
c.bmcs = append(c.bmcs, bmc)
return nil
}
// AllBMCs retrieves a copy of the catalogued BMC instances.
func (c *Catalogue) AllBMCs() []*v1alpha1.Machine {
bmcs := make([]*v1alpha1.Machine, len(c.bmcs))
copy(bmcs, c.bmcs)
return bmcs
}
// LookupBMC retrieves BMC instances on index with a key of key. Multiple BMCs _may_
// have the same key hence it can return multiple BMCs.
func (c *Catalogue) LookupBMC(index, key string) ([]*v1alpha1.Machine, error) {
untyped, err := c.bmcIndex.Lookup(index, key)
if err != nil {
return nil, err
}
bmcs := make([]*v1alpha1.Machine, len(untyped))
for i, v := range untyped {
bmcs[i] = v.(*v1alpha1.Machine)
}
return bmcs, nil
}
// TotalBMCs returns the total BMCs registered in the catalogue.
func (c *Catalogue) TotalBMCs() int {
return len(c.bmcs)
}
const BMCNameIndex = ".ObjectMeta.Name"
// WithBMCNameIndex creates a BMC index using BMCNameIndex on .ObjectMeta.Name.
func WithBMCNameIndex() CatalogueOption {
return func(c *Catalogue) {
c.IndexBMCs(BMCNameIndex, func(o interface{}) string {
bmc := o.(*v1alpha1.Machine)
return bmc.ObjectMeta.Name
})
}
}
// BMCCatalogueWriter converts Machine instances to Tinkerbell Machine and inserts them
// in a catalogue.
type BMCCatalogueWriter struct {
catalogue *Catalogue
}
var _ MachineWriter = &BMCCatalogueWriter{}
// NewBMCCatalogueWriter creates a new BMCCatalogueWriter instance.
func NewBMCCatalogueWriter(catalogue *Catalogue) *BMCCatalogueWriter {
return &BMCCatalogueWriter{catalogue: catalogue}
}
// Write converts m to a Tinkerbell Machine and inserts it into w's Catalogue.
func (w *BMCCatalogueWriter) Write(m Machine) error {
if m.HasBMC() {
return w.catalogue.InsertBMC(toRufioMachine(m))
}
return nil
}
func toRufioMachine(m Machine) *v1alpha1.Machine {
// TODO(chrisdoherty4)
// - Set the namespace to the CAPT namespace.
// - Patch through insecure TLS.
return &v1alpha1.Machine{
TypeMeta: newMachineTypeMeta(),
ObjectMeta: v1.ObjectMeta{
Name: formatBMCRef(m),
Namespace: constants.EksaSystemNamespace,
},
Spec: v1alpha1.MachineSpec{
Connection: v1alpha1.Connection{
Host: m.BMCIPAddress,
AuthSecretRef: corev1.SecretReference{
Name: formatBMCSecretRef(m),
Namespace: constants.EksaSystemNamespace,
},
InsecureTLS: true,
},
},
}
}
| 108 |
eks-anywhere | aws | Go | package hardware_test
import (
"testing"
"github.com/onsi/gomega"
"github.com/tinkerbell/rufio/api/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestCatalogue_BMC_Insert(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertBMC(&v1alpha1.Machine{})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalBMCs()).To(gomega.Equal(1))
}
func TestCatalogue_BMC_UnknownIndexErrors(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
_, err := catalogue.LookupBMC(hardware.BMCNameIndex, "Name")
g.Expect(err).To(gomega.HaveOccurred())
}
func TestCatalogue_BMC_Indexed(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue(hardware.WithBMCNameIndex())
const name = "hello"
expect := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{Name: name}}
err := catalogue.InsertBMC(expect)
g.Expect(err).ToNot(gomega.HaveOccurred())
received, err := catalogue.LookupBMC(hardware.BMCNameIndex, name)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(received).To(gomega.HaveLen(1))
g.Expect(received[0]).To(gomega.Equal(expect))
}
func TestCatalogue_BMC_AllBMCsReceivesCopy(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue(hardware.WithHardwareIDIndex())
const totalHardware = 1
err := catalogue.InsertBMC(&v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})
g.Expect(err).ToNot(gomega.HaveOccurred())
changedHardware := catalogue.AllBMCs()
g.Expect(changedHardware).To(gomega.HaveLen(totalHardware))
changedHardware[0] = &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "qux"}}
unchangedHardware := catalogue.AllBMCs()
g.Expect(unchangedHardware).ToNot(gomega.Equal(changedHardware))
}
func TestBMCCatalogueWriter_Write(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
writer := hardware.NewBMCCatalogueWriter(catalogue)
machine := NewValidMachine()
err := writer.Write(machine)
g.Expect(err).To(gomega.Succeed())
bmcs := catalogue.AllBMCs()
g.Expect(bmcs).To(gomega.HaveLen(1))
g.Expect(bmcs[0].Name).To(gomega.ContainSubstring(machine.Hostname))
g.Expect(bmcs[0].Spec.Connection.Host).To(gomega.Equal(machine.BMCIPAddress))
g.Expect(bmcs[0].Spec.Connection.AuthSecretRef.Name).To(gomega.ContainSubstring(machine.Hostname))
}
| 82 |
eks-anywhere | aws | Go | package hardware
import (
"fmt"
"math"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
)
// serializeHardwareSelector returns a key for use in a map unique selector.
func serializeHardwareSelector(selector eksav1alpha1.HardwareSelector) (string, error) {
return selector.ToString()
}
// ErrDiskNotFound indicates a disk was not found for a given selector.
type ErrDiskNotFound struct {
// A unique identifier for the selector, preferrably something useful to an end-user.
SelectorID string
}
func (e ErrDiskNotFound) Error() string {
return fmt.Sprintf("no disk found for hardware selector %v", e.SelectorID)
}
func (ErrDiskNotFound) Is(t error) bool {
_, ok := t.(ErrDiskNotFound)
return ok
}
// IndexHardware indexes Hardware instances on index by extracfting the key using fn.
func (c *Catalogue) IndexHardware(index string, fn KeyExtractorFunc) {
c.hardwareIndex.IndexField(index, fn)
}
// InsertHardware inserts Hardware into the catalogue. If any indexes exist, the hardware is
// indexed.
func (c *Catalogue) InsertHardware(hardware *tinkv1alpha1.Hardware) error {
if err := c.hardwareIndex.Insert(hardware); err != nil {
return err
}
c.hardware = append(c.hardware, hardware)
return nil
}
// RemoveHardwares removes a slice of hardwares from the catalogue.
func (c *Catalogue) RemoveHardwares(hardware []tinkv1alpha1.Hardware) error {
m := make(map[string]bool, len(hardware))
for _, hw := range hardware {
m[getRemoveKey(hw)] = true
}
diff := []*tinkv1alpha1.Hardware{}
for i, hw := range c.hardware {
key := getRemoveKey(*hw)
if _, ok := m[key]; !ok {
diff = append(diff, c.hardware[i])
} else {
if err := c.hardwareIndex.Remove(c.hardware[i]); err != nil {
return err
}
}
}
c.hardware = diff
return nil
}
// getRemoveKey returns key used to search and remove hardware.
func getRemoveKey(hardware tinkv1alpha1.Hardware) string {
return hardware.Name + ":" + hardware.Namespace
}
// AllHardware retrieves a copy of the catalogued Hardware instances.
func (c *Catalogue) AllHardware() []*tinkv1alpha1.Hardware {
hardware := make([]*tinkv1alpha1.Hardware, len(c.hardware))
copy(hardware, c.hardware)
return hardware
}
// LookupHardware retrieves Hardware instances on index with a key of key. Multiple hardware _may_
// have the same key hence it can return multiple Hardware.
func (c *Catalogue) LookupHardware(index, key string) ([]*tinkv1alpha1.Hardware, error) {
untyped, err := c.hardwareIndex.Lookup(index, key)
if err != nil {
return nil, err
}
hardware := make([]*tinkv1alpha1.Hardware, len(untyped))
for i, v := range untyped {
hardware[i] = v.(*tinkv1alpha1.Hardware)
}
return hardware, nil
}
// TotalHardware returns the total hardware registered in the catalogue.
func (c *Catalogue) TotalHardware() int {
return len(c.hardware)
}
const HardwareIDIndex = ".Spec.Metadata.Instance.ID"
// WithHardwareIDIndex creates a Hardware index using HardwareIDIndex on .Spec.Metadata.Instance.ID
// values.
func WithHardwareIDIndex() CatalogueOption {
return func(c *Catalogue) {
c.IndexHardware(HardwareIDIndex, func(o interface{}) string {
hardware := o.(*tinkv1alpha1.Hardware)
return hardware.Spec.Metadata.Instance.ID
})
}
}
const HardwareBMCRefIndex = ".Spec.BmcRef"
// WithHardwareBMCRefIndex creates a Hardware index using HardwareBMCRefIndex on .Spec.BmcRef.
func WithHardwareBMCRefIndex() CatalogueOption {
return func(c *Catalogue) {
c.IndexHardware(HardwareBMCRefIndex, func(o interface{}) string {
hardware := o.(*tinkv1alpha1.Hardware)
return hardware.Spec.BMCRef.String()
})
}
}
// HardwareCatalogueWriter converts Machine instances to Tinkerbell Hardware and inserts them
// in a catalogue.
type HardwareCatalogueWriter struct {
catalogue *Catalogue
}
var _ MachineWriter = &HardwareCatalogueWriter{}
// NewHardwareCatalogueWriter creates a new HardwareCatalogueWriter instance.
func NewHardwareCatalogueWriter(catalogue *Catalogue) *HardwareCatalogueWriter {
return &HardwareCatalogueWriter{catalogue: catalogue}
}
// Write converts m to a Tinkerbell Hardware and inserts it into w's Catalogue.
func (w *HardwareCatalogueWriter) Write(m Machine) error {
return w.catalogue.InsertHardware(hardwareFromMachine(m))
}
func hardwareFromMachine(m Machine) *tinkv1alpha1.Hardware {
// allow is necessary to allocate memory so we can get a bool pointer required by
// the hardware.
allow := true
// TODO(chrisdoherty4) Set the namespace to the CAPT namespace.
return &tinkv1alpha1.Hardware{
TypeMeta: newHardwareTypeMeta(),
ObjectMeta: v1.ObjectMeta{
Name: m.Hostname,
Namespace: constants.EksaSystemNamespace,
Labels: m.Labels,
},
Spec: tinkv1alpha1.HardwareSpec{
BMCRef: newBMCRefFromMachine(m),
Disks: []tinkv1alpha1.Disk{{Device: m.Disk}},
Metadata: &tinkv1alpha1.HardwareMetadata{
Facility: &tinkv1alpha1.MetadataFacility{
FacilityCode: "onprem",
PlanSlug: "c2.medium.x86",
},
Instance: &tinkv1alpha1.MetadataInstance{
ID: m.MACAddress,
Hostname: m.Hostname,
Ips: []*tinkv1alpha1.MetadataInstanceIP{
{
Address: m.IPAddress,
Netmask: m.Netmask,
Gateway: m.Gateway,
Family: 4,
Public: true,
},
},
// TODO(chrisdoherty4) Fix upstream. The OperatingSystem is used in boots to
// detect what iPXE scripts should be served. The Kubernetes back-end nilifies
// its response to retrieving the OS data and the handling code doesn't check
// for nil resulting in a segfault.
//
// Upstream needs patching but this will suffice for now.
OperatingSystem: &tinkv1alpha1.MetadataInstanceOperatingSystem{},
AllowPxe: true,
AlwaysPxe: true,
},
},
Interfaces: []tinkv1alpha1.Interface{
{
Netboot: &tinkv1alpha1.Netboot{
AllowPXE: &allow,
AllowWorkflow: &allow,
},
DHCP: &tinkv1alpha1.DHCP{
Arch: "x86_64",
MAC: m.MACAddress,
IP: &tinkv1alpha1.IP{
Address: m.IPAddress,
Netmask: m.Netmask,
Gateway: m.Gateway,
Family: 4,
},
// set LeaseTime to the max value so it effectively hands out max duration leases (~136 years)
// This value gets ignored for Ubuntu because we set static IPs for it
// It's only temporarily needed for Bottlerocket until Bottlerocket supports static IPs
LeaseTime: int64(math.Pow(2, 32) - 2),
Hostname: m.Hostname,
NameServers: m.Nameservers,
UEFI: true,
VLANID: m.VLANID,
},
},
},
},
}
}
// newBMCRefFromMachine returns a BMCRef pointer for Hardware.
func newBMCRefFromMachine(m Machine) *corev1.TypedLocalObjectReference {
if m.HasBMC() {
return &corev1.TypedLocalObjectReference{
Name: formatBMCRef(m),
Kind: tinkerbellBMCKind,
}
}
return nil
}
| 234 |
eks-anywhere | aws | Go | package hardware_test
import (
"testing"
"github.com/onsi/gomega"
"github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestCatalogue_Hardware_Insert(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertHardware(&v1alpha1.Hardware{})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(1))
}
func TestCatalogue_Hardwares_Remove(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw1",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
err = catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.RemoveHardwares([]v1alpha1.Hardware{
{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
},
})).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(1))
}
func TestCatalogue_Hardwares_RemoveDuplicates(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw1",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
err = catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
err = catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.RemoveHardwares([]v1alpha1.Hardware{
{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
},
})).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(1))
}
func TestCatalogue_Hardwares_RemoveExtraHw(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw1",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
err = catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
err = catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw3",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.RemoveHardwares([]v1alpha1.Hardware{
{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
},
{
ObjectMeta: v1.ObjectMeta{
Name: "hw3",
Namespace: "namespace",
},
},
})).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(1))
}
func TestCatalogue_Hardwares_RemoveNothing(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw1",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.RemoveHardwares([]v1alpha1.Hardware{
{
ObjectMeta: v1.ObjectMeta{
Name: "hw2",
Namespace: "namespace",
},
},
})).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(1))
}
func TestCatalogue_Hardwares_RemoveEverything(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertHardware(&v1alpha1.Hardware{
ObjectMeta: v1.ObjectMeta{
Name: "hw1",
Namespace: "namespace",
},
})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.RemoveHardwares([]v1alpha1.Hardware{
{
ObjectMeta: v1.ObjectMeta{
Name: "hw1",
Namespace: "namespace",
},
},
})).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(0))
}
func TestCatalogue_Hardware_UnknownIndexErrors(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
_, err := catalogue.LookupHardware(hardware.HardwareIDIndex, "ID")
g.Expect(err).To(gomega.HaveOccurred())
}
func TestCatalogue_Hardware_IDIndex(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue(hardware.WithHardwareIDIndex())
const id = "hello"
expect := &v1alpha1.Hardware{
Spec: v1alpha1.HardwareSpec{
Metadata: &v1alpha1.HardwareMetadata{
Instance: &v1alpha1.MetadataInstance{
ID: id,
},
},
},
}
err := catalogue.InsertHardware(expect)
g.Expect(err).ToNot(gomega.HaveOccurred())
received, err := catalogue.LookupHardware(hardware.HardwareIDIndex, id)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(received).To(gomega.HaveLen(1))
g.Expect(received[0]).To(gomega.Equal(expect))
}
func TestCatalogue_Hardware_BmcRefIndex(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue(hardware.WithHardwareBMCRefIndex())
group := "foobar"
ref := &corev1.TypedLocalObjectReference{
APIGroup: &group,
Kind: "bazqux",
Name: "secret",
}
expect := &v1alpha1.Hardware{Spec: v1alpha1.HardwareSpec{BMCRef: ref}}
err := catalogue.InsertHardware(expect)
g.Expect(err).ToNot(gomega.HaveOccurred())
received, err := catalogue.LookupHardware(hardware.HardwareBMCRefIndex, ref.String())
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(received).To(gomega.HaveLen(1))
g.Expect(received[0]).To(gomega.Equal(expect))
}
func TestCatalogue_Hardware_AllHardwareReceivesCopy(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue(hardware.WithHardwareIDIndex())
const totalHardware = 1
hw := &v1alpha1.Hardware{
Spec: v1alpha1.HardwareSpec{
Metadata: &v1alpha1.HardwareMetadata{
Instance: &v1alpha1.MetadataInstance{
ID: "foo",
},
},
},
}
err := catalogue.InsertHardware(hw)
g.Expect(err).ToNot(gomega.HaveOccurred())
changedHardware := catalogue.AllHardware()
g.Expect(changedHardware).To(gomega.HaveLen(totalHardware))
changedHardware[0] = &v1alpha1.Hardware{
Spec: v1alpha1.HardwareSpec{
Metadata: &v1alpha1.HardwareMetadata{
Instance: &v1alpha1.MetadataInstance{
ID: "qux",
},
},
},
}
unchangedHardware := catalogue.AllHardware()
g.Expect(unchangedHardware).ToNot(gomega.Equal(changedHardware))
}
func TestHardwareCatalogueWriter_Write(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
writer := hardware.NewHardwareCatalogueWriter(catalogue)
machine := NewValidMachine()
err := writer.Write(machine)
g.Expect(err).To(gomega.Succeed())
hardware := catalogue.AllHardware()
g.Expect(hardware).To(gomega.HaveLen(1))
g.Expect(hardware[0].Name).To(gomega.Equal(machine.Hostname))
}
| 283 |
eks-anywhere | aws | Go | package hardware
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"github.com/aws/eks-anywhere/pkg/constants"
)
// IndexSecret indexes Secret instances on index by extracfting the key using fn.
func (c *Catalogue) IndexSecret(index string, fn KeyExtractorFunc) {
c.secretIndex.IndexField(index, fn)
}
// InsertSecret inserts Secrets into the catalogue. If any indexes exist, the Secret is indexed.
func (c *Catalogue) InsertSecret(secret *corev1.Secret) error {
if err := c.secretIndex.Insert(secret); err != nil {
return err
}
c.secrets = append(c.secrets, secret)
return nil
}
// AllSecrets retrieves a copy of the catalogued Secret instances.
func (c *Catalogue) AllSecrets() []*corev1.Secret {
secrets := make([]*corev1.Secret, len(c.secrets))
copy(secrets, c.secrets)
return secrets
}
// LookupSecret retrieves Secret instances on index with a key of key. Multiple Secrets _may_
// have the same key hence it can return multiple Secrets.
func (c *Catalogue) LookupSecret(index, key string) ([]*corev1.Secret, error) {
untyped, err := c.secretIndex.Lookup(index, key)
if err != nil {
return nil, err
}
secrets := make([]*corev1.Secret, len(untyped))
for i, v := range untyped {
secrets[i] = v.(*corev1.Secret)
}
return secrets, nil
}
// TotalSecrets returns the total Secrets registered in the catalogue.
func (c *Catalogue) TotalSecrets() int {
return len(c.secrets)
}
const SecretNameIndex = ".ObjectMeta.Name"
// WithSecretNameIndex creates a Secret index using SecretNameIndex on Secret.ObjectMeta.Name.
func WithSecretNameIndex() CatalogueOption {
return func(c *Catalogue) {
c.IndexSecret(SecretNameIndex, func(o interface{}) string {
secret := o.(*corev1.Secret)
return secret.ObjectMeta.Name
})
}
}
// SecretCatalogueWriter converts Machine instances to Tinkerbell BaseboardManagement and inserts them
// in a catalogue.
type SecretCatalogueWriter struct {
catalogue *Catalogue
}
var _ MachineWriter = &SecretCatalogueWriter{}
// NewSecretCatalogueWriter creates a new SecretCatalogueWriter instance.
func NewSecretCatalogueWriter(catalogue *Catalogue) *SecretCatalogueWriter {
return &SecretCatalogueWriter{catalogue: catalogue}
}
// Write converts m to a Tinkerbell BaseboardManagement and inserts it into w's Catalogue.
func (w *SecretCatalogueWriter) Write(m Machine) error {
if m.HasBMC() {
return w.catalogue.InsertSecret(baseboardManagementSecretFromMachine(m))
}
return nil
}
func baseboardManagementSecretFromMachine(m Machine) *corev1.Secret {
return &corev1.Secret{
TypeMeta: newSecretTypeMeta(),
ObjectMeta: v1.ObjectMeta{
Name: formatBMCSecretRef(m),
Namespace: constants.EksaSystemNamespace,
Labels: map[string]string{
v1alpha3.ClusterctlMoveLabel: "true",
},
},
Type: "kubernetes.io/basic-auth",
Data: map[string][]byte{
"username": []byte(m.BMCUsername),
"password": []byte(m.BMCPassword),
},
}
}
| 103 |
eks-anywhere | aws | Go | package hardware_test
import (
"testing"
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestCatalogue_Secret_Insert(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
err := catalogue.InsertSecret(&corev1.Secret{})
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalSecrets()).To(gomega.Equal(1))
}
func TestCatalogue_Secret_UnknownIndexErrors(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
_, err := catalogue.LookupSecret(hardware.SecretNameIndex, "Name")
g.Expect(err).To(gomega.HaveOccurred())
}
func TestCatalogue_Secret_Indexed(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue(hardware.WithSecretNameIndex())
const name = "hello"
expect := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name}}
err := catalogue.InsertSecret(expect)
g.Expect(err).ToNot(gomega.HaveOccurred())
received, err := catalogue.LookupSecret(hardware.SecretNameIndex, name)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(received).To(gomega.HaveLen(1))
g.Expect(received[0]).To(gomega.Equal(expect))
}
func TestCatalogue_Secret_AllSecretsReceivesCopy(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue(hardware.WithHardwareIDIndex())
const totalHardware = 1
err := catalogue.InsertSecret(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})
g.Expect(err).ToNot(gomega.HaveOccurred())
changedHardware := catalogue.AllSecrets()
g.Expect(changedHardware).To(gomega.HaveLen(totalHardware))
changedHardware[0] = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "qux"}}
unchangedHardware := catalogue.AllSecrets()
g.Expect(unchangedHardware).ToNot(gomega.Equal(changedHardware))
}
func TestSecretCatalogueWriter_Write(t *testing.T) {
g := gomega.NewWithT(t)
catalogue := hardware.NewCatalogue()
writer := hardware.NewSecretCatalogueWriter(catalogue)
machine := NewValidMachine()
err := writer.Write(machine)
g.Expect(err).To(gomega.Succeed())
secrets := catalogue.AllSecrets()
g.Expect(secrets).To(gomega.HaveLen(1))
g.Expect(secrets[0].Name).To(gomega.ContainSubstring(machine.Hostname))
g.Expect(secrets[0].Data).To(gomega.HaveKeyWithValue("username", []byte(machine.BMCUsername)))
g.Expect(secrets[0].Data).To(gomega.HaveKeyWithValue("password", []byte(machine.BMCPassword)))
}
| 82 |
eks-anywhere | aws | Go | package hardware_test
import (
"bufio"
"bytes"
"testing"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
const hardwareManifestsYAML = `
apiVersion: tinkerbell.org/v1alpha1
kind: Hardware
metadata:
labels:
clusterctl.cluster.x-k8s.io/move: "true"
name: worker1
namespace: eksa-system
spec:
metadata:
instance:
id: "foo"
status: {}
---
apiVersion: tinkerbell.org/v1alpha1
kind: Machine
metadata:
labels:
clusterctl.cluster.x-k8s.io/move: "true"
name: bmc-worker1
namespace: eksa-system
spec:
connection:
authSecretRef:
name: bmc-worker1-auth
namespace: eksa-system
host: 192.168.0.10
status: {}
---
apiVersion: v1
data:
password: QWRtaW4=
username: YWRtaW4=
kind: Secret
metadata:
labels:
clusterctl.cluster.x-k8s.io/move: "true"
name: bmc-worker1-auth
namespace: eksa-system
type: kubernetes.io/basic-auth
`
func TestParseYAMLCatalogueWithData(t *testing.T) {
g := gomega.NewWithT(t)
buffer := bufio.NewReader(bytes.NewBufferString(hardwareManifestsYAML))
catalogue := hardware.NewCatalogue()
err := hardware.ParseYAMLCatalogue(catalogue, buffer)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(1))
g.Expect(catalogue.TotalBMCs()).To(gomega.Equal(1))
g.Expect(catalogue.TotalSecrets()).To(gomega.Equal(1))
}
func TestParseYAMLCatalogueWithoutData(t *testing.T) {
g := gomega.NewWithT(t)
var buf bytes.Buffer
buffer := bufio.NewReader(&buf)
catalogue := hardware.NewCatalogue()
err := hardware.ParseYAMLCatalogue(catalogue, buffer)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(catalogue.TotalHardware()).To(gomega.Equal(0))
g.Expect(catalogue.TotalBMCs()).To(gomega.Equal(0))
g.Expect(catalogue.TotalSecrets()).To(gomega.Equal(0))
}
| 83 |
eks-anywhere | aws | Go | package hardware
import (
"bufio"
"bytes"
stdcsv "encoding/csv"
"fmt"
"io"
"os"
"strings"
csv "github.com/gocarina/gocsv"
unstructuredutil "github.com/aws/eks-anywhere/pkg/utils/unstructured"
)
// CSVReader reads a CSV file and provides Machine instances. It satisfies the MachineReader interface. The ID field of
// the Machine is optional in the CSV. If unspecified, CSVReader will generate a UUID and apply it to the machine.
type CSVReader struct {
reader *csv.Unmarshaller
}
// NewCSVReader returns a new CSVReader instance that consumes csv data from r. r should return io.EOF when no more
// records are available.
func NewCSVReader(r io.Reader) (CSVReader, error) {
stdreader := stdcsv.NewReader(r)
reader, err := csv.NewUnmarshaller(stdreader, Machine{})
if err != nil {
return CSVReader{}, err
}
if err := ensureRequiredColumnsInCSV(reader.MismatchedStructFields); err != nil {
return CSVReader{}, err
}
return CSVReader{reader: reader}, nil
}
// Read reads a single entry from the CSV data source and returns a new Machine representation.
func (cr CSVReader) Read() (Machine, error) {
machine, err := cr.reader.Read()
if err != nil {
return Machine{}, err
}
return machine.(Machine), nil
}
// NewNormalizedCSVReaderFromFile creates a MachineReader instance backed by a CSVReader reading from path
// that applies default normalizations to machines.
func NewNormalizedCSVReaderFromFile(path string) (MachineReader, error) {
fh, err := os.Open(path)
if err != nil {
return CSVReader{}, err
}
reader, err := NewCSVReader(bufio.NewReader(fh))
if err != nil {
return nil, err
}
return NewNormalizer(reader), nil
}
// requiredColumns matches the csv tags on the Machine struct. These must remain in sync with
// the struct. We may consider an alternative that uses reflection to interpret whether a field
// is required in the future.
var requiredColumns = map[string]struct{}{
"hostname": {},
"ip_address": {},
"netmask": {},
"gateway": {},
"nameservers": {},
"mac": {},
"disk": {},
"labels": {},
}
func ensureRequiredColumnsInCSV(unmatched []string) error {
var intersection []string
for _, column := range unmatched {
if _, ok := requiredColumns[column]; ok {
intersection = append(intersection, column)
}
}
if len(intersection) > 0 {
return fmt.Errorf("missing required columns in csv: %v", strings.Join(intersection, ", "))
}
return nil
}
// BuildHardwareYAML builds a hardware yaml from the csv at the provided path.
func BuildHardwareYAML(path string) ([]byte, error) {
reader, err := NewNormalizedCSVReaderFromFile(path)
if err != nil {
return nil, fmt.Errorf("reading csv: %v", err)
}
var b bytes.Buffer
writer := NewTinkerbellManifestYAML(&b)
validator := NewDefaultMachineValidator()
err = TranslateAll(reader, writer, validator)
if err != nil {
return nil, fmt.Errorf("generating hardware yaml: %v", err)
}
return unstructuredutil.StripNull(b.Bytes())
}
| 113 |
eks-anywhere | aws | Go | package hardware_test
import (
"bytes"
stdcsv "encoding/csv"
"errors"
"fmt"
"strings"
"testing"
"testing/iotest"
csv "github.com/gocarina/gocsv"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestCSVReaderReads(t *testing.T) {
g := gomega.NewWithT(t)
buf := NewBufferedCSV()
expect := NewValidMachine()
err := csv.MarshalCSV([]hardware.Machine{expect}, buf)
g.Expect(err).ToNot(gomega.HaveOccurred())
reader, err := hardware.NewCSVReader(buf.Buffer)
g.Expect(err).ToNot(gomega.HaveOccurred())
machine, err := reader.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(machine).To(gomega.BeEquivalentTo(expect))
}
func TestCSVReaderWithMultipleLabels(t *testing.T) {
g := gomega.NewWithT(t)
buf := NewBufferedCSV()
expect := NewValidMachine()
expect.Labels["foo"] = "bar"
expect.Labels["qux"] = "baz"
err := csv.MarshalCSV([]hardware.Machine{expect}, buf)
g.Expect(err).ToNot(gomega.HaveOccurred())
reader, err := hardware.NewCSVReader(buf.Buffer)
g.Expect(err).ToNot(gomega.HaveOccurred())
machine, err := reader.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(machine).To(gomega.BeEquivalentTo(expect))
}
func TestCSVReaderFromFile(t *testing.T) {
g := gomega.NewWithT(t)
reader, err := hardware.NewNormalizedCSVReaderFromFile("./testdata/hardware.csv")
g.Expect(err).ToNot(gomega.HaveOccurred())
machine, err := reader.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(machine).To(gomega.Equal(
hardware.Machine{
Labels: map[string]string{"type": "cp"},
Nameservers: []string{"1.1.1.1"},
Gateway: "10.10.10.1",
Netmask: "255.255.255.0",
IPAddress: "10.10.10.10",
MACAddress: "00:00:00:00:00:01",
Hostname: "worker1",
Disk: "/dev/sda",
BMCIPAddress: "192.168.0.10",
BMCUsername: "Admin",
BMCPassword: "admin",
},
))
}
func TestNewCSVReaderWithIOReaderError(t *testing.T) {
g := gomega.NewWithT(t)
expect := errors.New("read err")
_, err := hardware.NewCSVReader(iotest.ErrReader(expect))
g.Expect(err).To(gomega.HaveOccurred())
g.Expect(err.Error()).To(gomega.ContainSubstring(expect.Error()))
}
func TestCSVReaderWithoutBMCHeaders(t *testing.T) {
g := gomega.NewWithT(t)
reader, err := hardware.NewNormalizedCSVReaderFromFile("./testdata/hardware_no_bmc_headers.csv")
g.Expect(err).ToNot(gomega.HaveOccurred())
machine, err := reader.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(machine).To(gomega.Equal(
hardware.Machine{
Labels: map[string]string{"type": "cp"},
Nameservers: []string{"1.1.1.1"},
Gateway: "10.10.10.1",
Netmask: "255.255.255.0",
IPAddress: "10.10.10.10",
MACAddress: "00:00:00:00:00:01",
Hostname: "worker1",
Disk: "/dev/sda",
BMCIPAddress: "",
BMCUsername: "",
BMCPassword: "",
},
))
}
func TestCSVReaderWithMissingRequiredColumns(t *testing.T) {
allHeaders := []string{
"hostname",
"ip_address",
"netmask",
"gateway",
"nameservers",
"mac",
"disk",
"labels",
}
for i, missing := range allHeaders {
t.Run(fmt.Sprintf("Missing_%v", missing), func(t *testing.T) {
// Create the set of included headers based on the current iteration.
included := make([]string, len(allHeaders))
copy(included, allHeaders)
included = append(included[0:i], included[i+1:]...)
// Create a buffer containing the included headers so the CSV reader can pull them.
buf := bytes.NewBufferString(fmt.Sprintf("%v", strings.Join(included, ",")))
g := gomega.NewWithT(t)
_, err := hardware.NewCSVReader(buf)
g.Expect(err).To(gomega.HaveOccurred())
g.Expect(err.Error()).To(gomega.ContainSubstring(missing))
})
}
}
func TestCSVBuildHardwareYamlFromCSV(t *testing.T) {
g := gomega.NewWithT(t)
hardwareYaml, err := hardware.BuildHardwareYAML("./testdata/hardware.csv")
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(hardwareYaml).To(gomega.Equal([]byte(`apiVersion: tinkerbell.org/v1alpha1
kind: Hardware
metadata:
labels:
type: cp
name: worker1
namespace: eksa-system
spec:
bmcRef:
kind: Machine
name: bmc-worker1
disks:
- device: /dev/sda
interfaces:
- dhcp:
arch: x86_64
hostname: worker1
ip:
address: 10.10.10.10
family: 4
gateway: 10.10.10.1
netmask: 255.255.255.0
lease_time: 4294967294
mac: "00:00:00:00:00:01"
name_servers:
- 1.1.1.1
uefi: true
netboot:
allowPXE: true
allowWorkflow: true
metadata:
facility:
facility_code: onprem
plan_slug: c2.medium.x86
instance:
allow_pxe: true
always_pxe: true
hostname: worker1
id: "00:00:00:00:00:01"
ips:
- address: 10.10.10.10
family: 4
gateway: 10.10.10.1
netmask: 255.255.255.0
public: true
operating_system: {}
status: {}
---
apiVersion: bmc.tinkerbell.org/v1alpha1
kind: Machine
metadata:
name: bmc-worker1
namespace: eksa-system
spec:
connection:
authSecretRef:
name: bmc-worker1-auth
namespace: eksa-system
host: 192.168.0.10
insecureTLS: true
port: 0
status: {}
---
apiVersion: v1
data:
password: YWRtaW4=
username: QWRtaW4=
kind: Secret
metadata:
labels:
clusterctl.cluster.x-k8s.io/move: "true"
name: bmc-worker1-auth
namespace: eksa-system
type: kubernetes.io/basic-auth`)))
}
// BufferedCSV is an in-memory CSV that satisfies io.Reader and io.Writer.
type BufferedCSV struct {
*bytes.Buffer
*stdcsv.Writer
*stdcsv.Reader
}
func NewBufferedCSV() *BufferedCSV {
buf := &BufferedCSV{Buffer: &bytes.Buffer{}}
buf.Writer = stdcsv.NewWriter(buf.Buffer)
buf.Reader = stdcsv.NewReader(buf.Buffer)
return buf
}
// Write writes record to b using the underlying csv.Writer but immediately flushes. This
// ensures the in-memory buffer is always up-to-date.
func (b *BufferedCSV) Write(record []string) error {
if err := b.Writer.Write(record); err != nil {
return err
}
b.Flush()
return nil
}
| 251 |
eks-anywhere | aws | Go | package hardware
import "fmt"
func formatBMCRef(m Machine) string {
return fmt.Sprintf("bmc-%s", m.Hostname)
}
func formatBMCSecretRef(m Machine) string {
return fmt.Sprintf("%s-auth", formatBMCRef(m))
}
| 12 |
eks-anywhere | aws | Go | package hardware
import (
"fmt"
"reflect"
)
// FieldIndexer indexes collection of objects for a single type against one of its fields.
// FieldIndexer is not thread safe.
type FieldIndexer struct {
expectedType reflect.Type
indexes map[string]*fieldIndex
}
// NewFieldIndexer creates a new FieldIndexer instance. object is the object to be indexed and will
// be checked during Insert() calls. NewFieldIndexer will panic if object is nil.
func NewFieldIndexer(object interface{}) *FieldIndexer {
objectType := reflect.TypeOf(object)
if objectType == nil {
panic("object cannot be nil")
}
return &FieldIndexer{
expectedType: objectType,
indexes: make(map[string]*fieldIndex),
}
}
// KeyExtractorFunc returns a key from object that can be used to look up the object.
type KeyExtractorFunc func(object interface{}) string
// IndexField registers a new index with i. field is the index name and should represent a path
// to the field such as `.Spec.ID`. fn is used to extract the lookup key on Insert() from the object
// to be inserted.
func (i *FieldIndexer) IndexField(field string, fn KeyExtractorFunc) {
i.indexes[field] = &fieldIndex{
index: make(map[string][]interface{}),
keyExtractorFunc: fn,
}
}
// Insert inserts v into i on all indexed fields registered with IndexField. If v is not of the
// expected type defined by NewFieldIndexer() ErrIncorrectType is returned. Multiple objects
// with the same index value may be inserted.
func (i *FieldIndexer) Insert(v interface{}) error {
objectType := reflect.TypeOf(v)
if objectType != i.expectedType {
return ErrIncorrectType{Expected: i.expectedType, Received: objectType}
}
for _, idx := range i.indexes {
idx.Insert(v)
}
return nil
}
// Lookup uses the index associated with field to find and return all objects associated with key.
// If field has no associated index created by IndexField ErrUnknownIndex is returned.
func (i *FieldIndexer) Lookup(field string, key string) ([]interface{}, error) {
idx, ok := i.indexes[field]
if !ok {
return nil, ErrUnknownIndex{Field: field}
}
return idx.Lookup(key), nil
}
// Remove removes v from all indexes if present. If v is not present Remove is a no-op. If v is of
// an incorrect type ErrUnknownType is returned.
func (i *FieldIndexer) Remove(v interface{}) error {
objectType := reflect.TypeOf(v)
if objectType != i.expectedType {
return ErrIncorrectType{Expected: i.expectedType, Received: objectType}
}
for _, idx := range i.indexes {
idx.Remove(v)
}
return nil
}
// fieldIndex represents a single index on a particular object. When inserting into the fieldIndex
// the key is extracted from the object using the KeyExtractorFunc.
type fieldIndex struct {
index map[string][]interface{}
keyExtractorFunc KeyExtractorFunc
}
func (i *fieldIndex) Insert(v interface{}) {
key := i.keyExtractorFunc(v)
i.index[key] = append(i.index[key], v)
}
func (i *fieldIndex) Lookup(key string) []interface{} {
return i.index[key]
}
func (i *fieldIndex) Remove(v interface{}) {
key := i.keyExtractorFunc(v)
delete(i.index, key)
}
// ErrIncorrectType indicates an incorrect type was used with a FieldIndexer.
type ErrIncorrectType struct {
Expected reflect.Type
Received reflect.Type
}
func (e ErrIncorrectType) Error() string {
return fmt.Sprintf("expected type '%s', received object of type '%v'", e.Expected, e.Received)
}
type ErrUnknownIndex struct {
Field string
}
func (e ErrUnknownIndex) Error() string {
return fmt.Sprintf("unknown index: %v", e.Field)
}
| 121 |
eks-anywhere | aws | Go | package hardware_test
import (
"testing"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestFieldIndexer_InsertAndLookup(t *testing.T) {
g := gomega.NewWithT(t)
type Object struct{ Name string }
const Index = ".Name"
indexer := hardware.NewFieldIndexer(&Object{})
indexer.IndexField(Index, func(o interface{}) string {
object := o.(*Object)
return object.Name
})
objects, err := indexer.Lookup(Index, "hello")
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.BeEmpty())
const name = "hello world"
expect := &Object{Name: name}
err = indexer.Insert(expect)
g.Expect(err).ToNot(gomega.HaveOccurred())
objects, err = indexer.Lookup(Index, name)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.HaveLen(1))
g.Expect(objects[0]).To(gomega.Equal(expect))
}
func TestFieldIndexer_InsertIncorrectType(t *testing.T) {
g := gomega.NewWithT(t)
type Object struct{ Name string }
const Index = ".Name"
indexer := hardware.NewFieldIndexer(&Object{})
indexer.IndexField(Index, func(o interface{}) string {
object := o.(*Object)
return object.Name
})
type IncorrectObject struct{}
err := indexer.Insert(IncorrectObject{})
g.Expect(err).To(gomega.HaveOccurred())
g.Expect(err).To(gomega.BeAssignableToTypeOf(hardware.ErrIncorrectType{}))
}
func TestFieldIndexer_NilObjectTypePanics(t *testing.T) {
g := gomega.NewWithT(t)
g.Expect(func() {
hardware.NewFieldIndexer(nil)
}).To(gomega.Panic())
}
func TestFieldIndexer_NilInterfacePanics(t *testing.T) {
g := gomega.NewWithT(t)
g.Expect(func() {
var i interface{}
hardware.NewFieldIndexer(i)
}).To(gomega.Panic())
}
func TestFieldIndexer_LookupUnknownIndexPanics(t *testing.T) {
g := gomega.NewWithT(t)
type Object struct{ Name string }
indexer := hardware.NewFieldIndexer(&Object{})
_, err := indexer.Lookup("unknown index", "key")
g.Expect(err).To(gomega.HaveOccurred())
}
func TestFieldIndexer_RemoveValue(t *testing.T) {
g := gomega.NewWithT(t)
type Object struct{ Name string }
const Index = ".Name"
indexer := hardware.NewFieldIndexer(&Object{})
indexer.IndexField(Index, func(o interface{}) string {
object := o.(*Object)
return object.Name
})
const name = "hello world"
o := &Object{Name: name}
err := indexer.Insert(o)
g.Expect(err).ToNot(gomega.HaveOccurred())
objects, err := indexer.Lookup(Index, name)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.HaveLen(1))
err = indexer.Remove(o)
g.Expect(err).ToNot(gomega.HaveOccurred())
objects, err = indexer.Lookup(Index, name)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.BeEmpty())
}
func TestFieldIndexer_RemoveIncorrectTypeIsNoop(t *testing.T) {
g := gomega.NewWithT(t)
type Object struct{ Name string }
const Index = ".Name"
indexer := hardware.NewFieldIndexer(&Object{})
indexer.IndexField(Index, func(o interface{}) string {
object := o.(*Object)
return object.Name
})
objects, err := indexer.Lookup(Index, "hello")
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.BeEmpty())
err = indexer.Remove("hello")
g.Expect(err).To(gomega.HaveOccurred())
objects, err = indexer.Lookup(Index, "hello")
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.BeEmpty())
}
func TestFieldIndexer_RemoveUnknownValueIsNoop(t *testing.T) {
g := gomega.NewWithT(t)
type Object struct{ Name string }
const Index = ".Name"
indexer := hardware.NewFieldIndexer(&Object{})
indexer.IndexField(Index, func(o interface{}) string {
object := o.(*Object)
return object.Name
})
objects, err := indexer.Lookup(Index, "hello")
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.BeEmpty())
o := &Object{Name: "i am unknown"}
err = indexer.Remove(o)
g.Expect(err).ToNot(gomega.HaveOccurred())
objects, err = indexer.Lookup(Index, "hello")
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(objects).To(gomega.BeEmpty())
}
| 153 |
eks-anywhere | aws | Go | package hardware
import (
"context"
"fmt"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/constants"
)
// OwnerNameLabel is the label set by CAPT to mark a hardware as part of a cluster.
const OwnerNameLabel string = "v1alpha1.tinkerbell.org/ownerName"
// KubeReader reads the tinkerbell hardware objects from the cluster.
// It holds the objects in a catalogue.
type KubeReader struct {
client client.Client
catalogue *Catalogue
}
// NewKubeReader returns a new instance of KubeReader.
// Defines a new Catalogue for each KubeReader instance.
func NewKubeReader(client client.Client) *KubeReader {
return &KubeReader{
client: client,
catalogue: NewCatalogue(
WithHardwareIDIndex(),
WithHardwareBMCRefIndex(),
WithBMCNameIndex(),
WithSecretNameIndex(),
),
}
}
// LoadHardware fetches the unprovisioned tinkerbell hardware objects and inserts in to KubeReader catalogue.
func (kr *KubeReader) LoadHardware(ctx context.Context) error {
hwList, err := kr.getUnprovisionedTinkerbellHardware(ctx)
if err != nil {
return fmt.Errorf("failed to build catalogue: %v", err)
}
for i := range hwList {
if err := kr.catalogue.InsertHardware(&hwList[i]); err != nil {
return err
}
}
return nil
}
// GetCatalogue returns the KubeReader catalogue.
func (kr *KubeReader) GetCatalogue() *Catalogue {
return kr.catalogue
}
// getUnprovisionedTinkerbellHardware fetches the tinkerbell hardware objects on the cluster which do not have an ownerName label.
func (kr *KubeReader) getUnprovisionedTinkerbellHardware(ctx context.Context) ([]tinkv1alpha1.Hardware, error) {
var selectedHardware tinkv1alpha1.HardwareList
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: OwnerNameLabel,
Operator: metav1.LabelSelectorOpDoesNotExist,
},
},
})
if err != nil {
return nil, fmt.Errorf("converting label selector: %w", err)
}
if err := kr.client.List(ctx, &selectedHardware, &client.ListOptions{LabelSelector: selector}, client.InNamespace(constants.EksaSystemNamespace)); err != nil {
return nil, fmt.Errorf("listing hardware without owner: %v", err)
}
return selectedHardware.Items, nil
}
// LoadRufioMachines fetches rufio machine objects from the cluster and inserts into KubeReader catalogue.
func (kr *KubeReader) LoadRufioMachines(ctx context.Context) error {
var rufioMachines rufiov1alpha1.MachineList
if err := kr.client.List(ctx, &rufioMachines, &client.ListOptions{Namespace: constants.EksaSystemNamespace}); err != nil {
return fmt.Errorf("listing rufio machines: %v", err)
}
for i := range rufioMachines.Items {
if err := kr.catalogue.InsertBMC(&rufioMachines.Items[i]); err != nil {
return err
}
}
return nil
}
| 97 |
eks-anywhere | aws | Go | package hardware_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestLoadHardwareSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
hw := tinkv1alpha1.Hardware{
ObjectMeta: metav1.ObjectMeta{
Name: "hw1",
Namespace: constants.EksaSystemNamespace,
Labels: map[string]string{
"type": "cp",
},
},
Spec: tinkv1alpha1.HardwareSpec{
Metadata: &tinkv1alpha1.HardwareMetadata{
Instance: &tinkv1alpha1.MetadataInstance{
ID: "foo",
},
},
},
}
scheme := runtime.NewScheme()
_ = tinkv1alpha1.AddToScheme(scheme)
objs := []runtime.Object{&hw}
cb := fake.NewClientBuilder()
cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()
kubeReader := hardware.NewKubeReader(cl)
err := kubeReader.LoadHardware(ctx)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(kubeReader.GetCatalogue().AllHardware())).To(Equal(1))
}
func TestLoadHardwareNoHardware(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
hw := tinkv1alpha1.Hardware{
ObjectMeta: metav1.ObjectMeta{
Name: "hw1",
Labels: map[string]string{
hardware.OwnerNameLabel: "cluster",
"type": "cp",
},
},
Spec: tinkv1alpha1.HardwareSpec{
Metadata: &tinkv1alpha1.HardwareMetadata{
Instance: &tinkv1alpha1.MetadataInstance{
ID: "foo",
},
},
},
}
scheme := runtime.NewScheme()
_ = tinkv1alpha1.AddToScheme(scheme)
objs := []runtime.Object{&hw}
cb := fake.NewClientBuilder()
cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()
kubeReader := hardware.NewKubeReader(cl)
err := kubeReader.LoadHardware(ctx)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(kubeReader.GetCatalogue().AllHardware())).To(Equal(0))
}
func TestLoadRufioMachinesSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
rm := rufiov1alpha1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "bm1",
Namespace: constants.EksaSystemNamespace,
},
}
scheme := runtime.NewScheme()
_ = rufiov1alpha1.AddToScheme(scheme)
objs := []runtime.Object{&rm}
cb := fake.NewClientBuilder()
cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()
kubeReader := hardware.NewKubeReader(cl)
err := kubeReader.LoadRufioMachines(ctx)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(len(kubeReader.GetCatalogue().AllBMCs())).To(Equal(1))
}
func TestLoadRufioMachinesListFail(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects().Build()
kubeReader := hardware.NewKubeReader(cl)
err := kubeReader.LoadRufioMachines(ctx)
g.Expect(err).To(HaveOccurred())
}
| 116 |
eks-anywhere | aws | Go | package hardware
import (
"fmt"
"sort"
"strings"
)
// Machine is a machine configuration with optional BMC interface configuration.
type Machine struct {
Hostname string `csv:"hostname"`
IPAddress string `csv:"ip_address"`
Netmask string `csv:"netmask"`
Gateway string `csv:"gateway"`
Nameservers Nameservers `csv:"nameservers"`
MACAddress string `csv:"mac"`
// Disk used to populate the default workflow actions.
// Currently needs to be the same for all hardware residing in the same group where a group
// is either: control plane hardware, external etcd hard, or the definable worker node groups.
Disk string `csv:"disk"`
// Labels to be applied to the Hardware resource.
Labels Labels `csv:"labels"`
BMCIPAddress string `csv:"bmc_ip, omitempty"`
BMCUsername string `csv:"bmc_username, omitempty"`
BMCPassword string `csv:"bmc_password, omitempty"`
VLANID string `csv:"vlan_id, omitempty"`
}
// HasBMC determines if m has a BMC configuration. A BMC configuration is present if any of the BMC fields
// contain non-empty strings.
func (m *Machine) HasBMC() bool {
return m.BMCIPAddress != "" || m.BMCUsername != "" || m.BMCPassword != ""
}
// NameserversSeparator is used to unmarshal Nameservers.
const NameserversSeparator = "|"
// Nameservers is a custom type that can unmarshal a CSV representation of nameservers.
type Nameservers []string
func (n *Nameservers) String() string {
return strings.Join(*n, NameserversSeparator)
}
// UnmarshalCSV unmarshalls s where is is a list of nameservers separated by NameserversSeparator.
func (n *Nameservers) UnmarshalCSV(s string) error {
servers := strings.Split(s, NameserversSeparator)
*n = append(*n, servers...)
return nil
}
// MarshalCSV marshalls Nameservers into a string list of nameservers separated by NameserversSeparator.
func (n *Nameservers) MarshalCSV() (string, error) {
return n.String(), nil
}
// LabelSSeparator is used to separate key value label pairs.
const LabelsSeparator = "|"
// Labels defines a lebsl set. It satisfies https://pkg.go.dev/k8s.io/apimachinery/pkg/labels#Labels.
type Labels map[string]string
// Get returns the value for the provided label.
func (l Labels) Has(k string) bool {
_, ok := l[k]
return ok
}
// See https://pkg.go.dev/k8s.io/apimachinery/pkg/labels#Labels
func (l Labels) Get(k string) string {
return l[k]
}
func (l *Labels) MarshalCSV() (string, error) {
return l.String(), nil
}
func (l *Labels) UnmarshalCSV(s string) error {
// Ensure we make the map so consumers of l don't segfault.
*l = make(Labels)
// Cater for no labels being specified.
split := strings.Split(s, LabelsSeparator)
if len(split) == 1 && split[0] == "" {
return nil
}
for _, pair := range split {
keyValue := strings.Split(strings.TrimSpace(pair), "=")
if len(keyValue) != 2 {
return fmt.Errorf("badly formatted key-value pair: %v", pair)
}
(*l)[strings.TrimSpace(keyValue[0])] = strings.TrimSpace(keyValue[1])
}
return nil
}
func (l Labels) String() string {
labels := make([]string, 0, len(l))
for key, value := range l {
labels = append(labels, fmt.Sprintf("%v=%v", key, value))
}
// Sort for determinism.
sort.StringSlice(labels).Sort()
return strings.Join(labels, LabelsSeparator)
}
func newEmptyFieldError(s string) error {
return newMachineError(fmt.Sprintf("%v is empty", s))
}
func newMachineError(s string) error {
return fmt.Errorf("machine: %v", s)
}
| 119 |
eks-anywhere | aws | Go | package hardware
// multiWriter implements MachineWriter. It writes Machine instances to multiple writers similar to the tee
// unix tool writes to multiple output streams.
type multiWriter []MachineWriter
// Write m to all MachineWriters attached to t with t.Attach(...). If a MachineWriter returns an error
// Write immediately returns the error without attempting to write to any other writers.
func (t multiWriter) Write(m Machine) error {
for _, writer := range t {
if err := writer.Write(m); err != nil {
return err
}
}
return nil
}
// MultiMachineWriter combines writers into a single MachineWriter instance. Passing no writers effectively creates a
// noop MachineWriter.
func MultiMachineWriter(writers ...MachineWriter) MachineWriter {
var tee multiWriter
tee = append(tee, writers...)
return tee
}
| 26 |
eks-anywhere | aws | Go | package hardware_test
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware/mocks"
)
func TestTeeWriterWritesToAllWriters(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
writer1 := mocks.NewMockMachineWriter(ctrl)
writer2 := mocks.NewMockMachineWriter(ctrl)
expect := hardware.Machine{Hostname: "quxer"}
var machine1, machine2 hardware.Machine
writer1.EXPECT().
Write(expect).
DoAndReturn(func(m hardware.Machine) error {
machine1 = m
return nil
})
writer2.EXPECT().
Write(expect).
Do(func(m hardware.Machine) error {
machine2 = m
return nil
}).
Return((error)(nil))
tee := hardware.MultiMachineWriter(writer1, writer2)
err := tee.Write(expect)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(machine1).To(gomega.BeEquivalentTo(expect))
g.Expect(machine2).To(gomega.BeEquivalentTo(expect))
}
func TestTeeWriterFirstWriterErrors(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
writer1 := mocks.NewMockMachineWriter(ctrl)
writer2 := mocks.NewMockMachineWriter(ctrl)
machine := hardware.Machine{Hostname: "qux-foo"}
expect := errors.New("first writer error")
writer1.EXPECT().
Write(machine).
Return(expect)
tee := hardware.MultiMachineWriter(writer1, writer2)
err := tee.Write(machine)
g.Expect(err).To(gomega.BeEquivalentTo(expect))
}
func TestTeeWriterSecondWriterErrors(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
writer1 := mocks.NewMockMachineWriter(ctrl)
writer2 := mocks.NewMockMachineWriter(ctrl)
machine := hardware.Machine{Hostname: "qux-foo"}
expect := errors.New("first writer error")
writer1.EXPECT().
Write(machine).
Return((error)(nil))
writer2.EXPECT().
Write(machine).
Return(expect)
tee := hardware.MultiMachineWriter(writer1, writer2)
err := tee.Write(machine)
g.Expect(err).To(gomega.BeEquivalentTo(expect))
}
| 94 |
eks-anywhere | aws | Go | package hardware
import "strings"
// NormalizerFunc applies a normalization transformation to the Machine.
type NormalizerFunc func(Machine) Machine
// Normalizer is a decorator for a MachineReader that applies a set of normalization funcs
// to machines.
type Normalizer struct {
reader MachineReader
normalizers []NormalizerFunc
}
// NewNormalizer creates a Normalizer instance that decorates r's Read(). A set of default
// normalization functions are pre-registered.
func NewNormalizer(r MachineReader) *Normalizer {
normalizer := NewRawNormalizer(r)
RegisterDefaultNormalizations(normalizer)
return normalizer
}
// NewRawNormalizer returns a Normalizer with default normalizations registered by
// RegisterDefaultNormalizations.
func NewRawNormalizer(r MachineReader) *Normalizer {
return &Normalizer{reader: r}
}
// Read reads an Machine from the decorated MachineReader, applies all normalization funcs and
// returns the machine. If the decorated MachineReader errors, it is returned.
func (n Normalizer) Read() (Machine, error) {
machine, err := n.reader.Read()
if err != nil {
return Machine{}, err
}
for _, fn := range n.normalizers {
machine = fn(machine)
}
return machine, nil
}
// Register fn to n such that fn is run over each machine read from the wrapped MachineReader.
func (n *Normalizer) Register(fn NormalizerFunc) {
n.normalizers = append(n.normalizers, fn)
}
// LowercaseMACAddress ensures m's MACAddress field has lower chase characters.
func LowercaseMACAddress(m Machine) Machine {
m.MACAddress = strings.ToLower(m.MACAddress)
return m
}
// RegisterDefaultNormalizations registers a set of default normalizations on n.
func RegisterDefaultNormalizations(n *Normalizer) {
for _, fn := range []NormalizerFunc{
LowercaseMACAddress,
} {
n.Register(fn)
}
}
| 63 |
eks-anywhere | aws | Go | package hardware_test
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware/mocks"
)
func TestNormalizer(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockMachineReader(ctrl)
normalizer := hardware.NewNormalizer(reader)
expect := NewValidMachine()
expect.MACAddress = "AA:BB:CC:DD:EE:FF"
reader.EXPECT().Read().Return(expect, (error)(nil))
machine, err := normalizer.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
// Re-use the expect machine instance and lower-case the MAC.
expect.MACAddress = "aa:bb:cc:dd:ee:ff"
g.Expect(machine).To(gomega.Equal(expect))
}
func TestRawNormalizer(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockMachineReader(ctrl)
normalizer := hardware.NewNormalizer(reader)
expect := NewValidMachine()
expect.MACAddress = "AA:BB:CC:DD:EE:FF"
reader.EXPECT().Read().Return(expect, (error)(nil))
machine, err := normalizer.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(machine).To(gomega.Equal(machine))
}
func TestRawNormalizerReadError(t *testing.T) {
g := gomega.NewWithT(t)
ctrl := gomock.NewController(t)
reader := mocks.NewMockMachineReader(ctrl)
normalizer := hardware.NewNormalizer(reader)
expect := errors.New("foo bar")
reader.EXPECT().Read().Return(hardware.Machine{}, expect)
_, err := normalizer.Read()
g.Expect(err).To(gomega.HaveOccurred())
}
| 66 |
eks-anywhere | aws | Go | package hardware
import (
"fmt"
"io"
)
// MachineReader reads single Machine configuration at a time. When there are no more Machine entries
// to be read, Read() returns io.EOF.
type MachineReader interface {
Read() (Machine, error)
}
// MachineWriter writes Machine entries.
type MachineWriter interface {
Write(Machine) error
}
// MachineValidator validates an instance of Machine.
type MachineValidator interface {
Validate(Machine) error
}
// TranslateAll reads entries 1 at a time from reader and writes them to writer. When reader returns io.EOF,
// TranslateAll returns nil. Failure to return io.EOF from reader will result in an infinite loop.
func TranslateAll(reader MachineReader, writer MachineWriter, validator MachineValidator) error {
for {
err := Translate(reader, writer, validator)
if err == io.EOF {
return nil
}
if err != nil {
return err
}
}
}
// Translate reads 1 entry from reader and writes it to writer. When reader returns io.EOF Translate
// returns io.EOF to the caller.
func Translate(reader MachineReader, writer MachineWriter, validator MachineValidator) error {
machine, err := reader.Read()
if err == io.EOF {
return err
}
if err != nil {
return fmt.Errorf("read: invalid hardware: %v", err)
}
if err := validator.Validate(machine); err != nil {
return err
}
if err := writer.Write(machine); err != nil {
return fmt.Errorf("write: %v", err)
}
return nil
}
| 61 |
eks-anywhere | aws | Go | package hardware_test
import (
"errors"
"io"
"testing"
"github.com/golang/mock/gomock"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware/mocks"
)
func TestTranslateReadsAndWrites(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
machine := hardware.Machine{
Hostname: "foot-bar",
}
var receivedMachine hardware.Machine
reader.EXPECT().Read().Return(machine, (error)(nil))
validator.EXPECT().Validate(machine).Return((error)(nil))
writer.EXPECT().
Write(machine).
Do(func(machine hardware.Machine) {
receivedMachine = machine
}).
Return((error)(nil))
err := hardware.Translate(reader, writer, validator)
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(receivedMachine).To(gomega.BeEquivalentTo(machine))
}
func TestTranslateWithReadError(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
expect := errors.New("luck-number-10")
reader.EXPECT().Read().Return(hardware.Machine{}, expect)
err := hardware.Translate(reader, writer, validator)
g.Expect(err.Error()).To(gomega.ContainSubstring(expect.Error()))
}
func TestTranslateWithWriteError(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
machine := hardware.Machine{Hostname: "lucky-number-10"}
expect := errors.New("luck-number-10")
reader.EXPECT().Read().Return(machine, (error)(nil))
validator.EXPECT().Validate(machine).Return((error)(nil))
writer.EXPECT().Write(machine).Return(expect)
err := hardware.Translate(reader, writer, validator)
g.Expect(err.Error()).To(gomega.ContainSubstring(expect.Error()))
}
func TestTranslateReturnsEOFWhenReaderEOFs(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
reader.EXPECT().Read().Return(hardware.Machine{}, io.EOF)
err := hardware.Translate(reader, writer, validator)
g.Expect(err).To(gomega.BeEquivalentTo(io.EOF))
}
func TestTranslateWithValidationError(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
expect := errors.New("validation error")
reader.EXPECT().Read().Return(hardware.Machine{}, (error)(nil))
validator.EXPECT().Validate(hardware.Machine{}).Return(expect)
err := hardware.Translate(reader, writer, validator)
g.Expect(err.Error()).To(gomega.ContainSubstring(expect.Error()))
}
func TestTranslateAllReadsAndWritesMaskingEOF(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
machine := hardware.Machine{Hostname: "lucky-number-10"}
// use readCount to track how many times the Read() call has been made. On
// the second call we return io.EOF.
var readCount int
reader.EXPECT().
Read().
Times(2).
DoAndReturn(func() (hardware.Machine, error) {
if readCount == 1 {
return hardware.Machine{}, io.EOF
}
readCount++
return machine, nil
})
validator.EXPECT().Validate(machine).Return((error)(nil))
// we only expect Write() to bec alled once because the io.EOF shouldn't result in
// a write.
writer.EXPECT().Write(machine).Times(1).Return((error)(nil))
err := hardware.TranslateAll(reader, writer, validator)
g.Expect(err).ToNot(gomega.HaveOccurred())
}
func TestTranslateAllWithReadError(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
expect := errors.New("luck-number-10")
reader.EXPECT().Read().Return(hardware.Machine{}, expect)
err := hardware.TranslateAll(reader, writer, validator)
g.Expect(err.Error()).To(gomega.ContainSubstring(expect.Error()))
}
func TestTranslateAllWithWriteError(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
machine := hardware.Machine{Hostname: "lucky-number-10"}
expect := errors.New("luck-number-10")
reader.EXPECT().Read().Return(machine, (error)(nil))
validator.EXPECT().Validate(machine).Return((error)(nil))
writer.EXPECT().Write(machine).Return(expect)
err := hardware.TranslateAll(reader, writer, validator)
g.Expect(err.Error()).To(gomega.ContainSubstring(expect.Error()))
}
func TestTranslateAllWithValidationError(t *testing.T) {
ctrl := gomock.NewController(t)
g := gomega.NewWithT(t)
reader := mocks.NewMockMachineReader(ctrl)
writer := mocks.NewMockMachineWriter(ctrl)
validator := mocks.NewMockMachineValidator(ctrl)
expect := errors.New("validation error")
reader.EXPECT().Read().Return(hardware.Machine{}, (error)(nil))
validator.EXPECT().Validate(hardware.Machine{}).Return(expect)
err := hardware.TranslateAll(reader, writer, validator)
g.Expect(err.Error()).To(gomega.ContainSubstring(expect.Error()))
}
| 201 |
eks-anywhere | aws | Go | package hardware
import v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// TypeMeta constants for defining Kubernetes TypeMeta data in Kubernetes objects.
const (
// TODO(pokearu) update API version once upstream is changed.
rufioAPIVersion = "bmc.tinkerbell.org/v1alpha1"
tinkerbellAPIVersion = "tinkerbell.org/v1alpha1"
tinkerbellHardwareKind = "Hardware"
tinkerbellBMCKind = "Machine"
secretKind = "Secret"
secretAPIVersion = "v1"
)
func newHardwareTypeMeta() v1.TypeMeta {
return v1.TypeMeta{
Kind: tinkerbellHardwareKind,
APIVersion: tinkerbellAPIVersion,
}
}
func newMachineTypeMeta() v1.TypeMeta {
return v1.TypeMeta{
Kind: tinkerbellBMCKind,
APIVersion: rufioAPIVersion,
}
}
func newSecretTypeMeta() v1.TypeMeta {
return v1.TypeMeta{
Kind: secretKind,
APIVersion: secretAPIVersion,
}
}
| 37 |
eks-anywhere | aws | Go | package hardware
import (
"errors"
"fmt"
"net"
"regexp"
"strconv"
"strings"
apimachineryvalidation "k8s.io/apimachinery/pkg/util/validation"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/networkutils"
)
// MachineAssertion defines a condition that Machine must meet.
type MachineAssertion func(Machine) error
// DefaultMachineValidator validated Machine instances.
type DefaultMachineValidator struct {
assertions []MachineAssertion
}
var _ MachineValidator = &DefaultMachineValidator{}
// NewDefaultMachineValidator creates a machineValidator instance with default assertions registered.
func NewDefaultMachineValidator() *DefaultMachineValidator {
validator := &DefaultMachineValidator{}
RegisterDefaultAssertions(validator)
return validator
}
// Validate validates machine by executing its Validate() method and passing it to all registered MachineAssertions.
func (mv *DefaultMachineValidator) Validate(machine Machine) error {
for _, fn := range mv.assertions {
if err := fn(machine); err != nil {
return err
}
}
return nil
}
// Register registers v MachineAssertions with m.
func (mv *DefaultMachineValidator) Register(v ...MachineAssertion) {
mv.assertions = append(mv.assertions, v...)
}
var (
linuxPathRegex = `^(/dev/[\w-]+)+$`
linuxPathValidation = regexp.MustCompile(linuxPathRegex)
)
// StaticMachineAssertions defines all static data assertions performed on a Machine.
func StaticMachineAssertions() MachineAssertion {
return func(m Machine) error {
if m.IPAddress == "" {
return newEmptyFieldError("IPAddress")
}
if err := networkutils.ValidateIP(m.IPAddress); err != nil {
return fmt.Errorf("IPAddress: %v", err)
}
if m.Gateway == "" {
return newEmptyFieldError("Gateway")
}
if err := networkutils.ValidateIP(m.Gateway); err != nil {
return fmt.Errorf("Gateway: %v", err)
}
if len(m.Nameservers) == 0 {
return newEmptyFieldError("Nameservers")
}
for _, nameserver := range m.Nameservers {
if nameserver == "" {
return newMachineError("Nameservers contains an empty entry")
}
}
if m.Netmask == "" {
return newEmptyFieldError("Netmask")
}
if m.MACAddress == "" {
return newEmptyFieldError("MACAddress")
}
if _, err := net.ParseMAC(m.MACAddress); err != nil {
return fmt.Errorf("MACAddress: %v", err)
}
if m.Hostname == "" {
return newEmptyFieldError("Hostname")
}
if errs := apimachineryvalidation.IsDNS1123Subdomain(m.Hostname); len(errs) > 0 {
return fmt.Errorf("invalid hostname: %v: %v", m.Hostname, errs)
}
if !linuxPathValidation.MatchString(m.Disk) {
return fmt.Errorf(
"disk must be a valid linux path (\"%v\")",
linuxPathRegex,
)
}
for key, value := range m.Labels {
if err := validateLabelKey(key); err != nil {
return err
}
if err := validateLabelValue(value); err != nil {
return err
}
}
if m.HasBMC() {
if m.BMCIPAddress == "" {
return newEmptyFieldError("BMCIPAddress")
}
if err := networkutils.ValidateIP(m.BMCIPAddress); err != nil {
return fmt.Errorf("BMCIPAddress: %v", err)
}
if m.BMCUsername == "" {
return newEmptyFieldError("BMCUsername")
}
if m.BMCPassword == "" {
return newEmptyFieldError("BMCPassword")
}
}
if m.VLANID != "" {
i, err := strconv.Atoi(m.VLANID)
if err != nil {
return errors.New("VLANID: must be a string integer")
}
// valid VLAN IDs are between 1 and 4094 - https://en.m.wikipedia.org/wiki/VLAN#IEEE_802.1Q
const (
maxVLANID = 4094
minVLANID = 1
)
if i < minVLANID || i > maxVLANID {
return errors.New("VLANID: must be between 1 and 4094")
}
}
return nil
}
}
// UniqueIPAddress asserts a given Machine instance has a unique IPAddress field relative to previously seen Machine
// instances. It is not thread safe. It has a 1 time use.
func UniqueIPAddress() MachineAssertion {
ips := make(map[string]struct{})
return func(m Machine) error {
if _, seen := ips[m.IPAddress]; seen {
return fmt.Errorf("duplicate IPAddress: %v", m.IPAddress)
}
ips[m.IPAddress] = struct{}{}
return nil
}
}
// UniqueMACAddress asserts a given Machine instance has a unique MACAddress field relative to previously seen Machine
// instances. It is not thread safe. It has a 1 time use.
func UniqueMACAddress() MachineAssertion {
macs := make(map[string]struct{})
return func(m Machine) error {
if _, seen := macs[m.MACAddress]; seen {
return fmt.Errorf("duplicate MACAddress: %v", m.MACAddress)
}
macs[m.MACAddress] = struct{}{}
return nil
}
}
// UniqueHostnames asserts a given Machine instance has a unique Hostname field relative to previously seen Machine
// instances. It is not thread safe. It has a 1 time use.
func UniqueHostnames() MachineAssertion {
hostnames := make(map[string]struct{})
return func(m Machine) error {
if _, seen := hostnames[m.Hostname]; seen {
return fmt.Errorf("duplicate Hostname: %v", m.Hostname)
}
hostnames[m.Hostname] = struct{}{}
return nil
}
}
// UniqueBMCIPAddress asserts a given Machine instance has a unique BMCIPAddress field relative to previously seen
// Machine instances. If there is no BMC configuration as defined by machine.HasBMC() the check is a noop. It is
// not thread safe. It has a 1 time use.
func UniqueBMCIPAddress() MachineAssertion {
ips := make(map[string]struct{})
return func(m Machine) error {
if !m.HasBMC() {
return nil
}
if m.BMCIPAddress == "" {
return fmt.Errorf("missing BMCIPAddress (mac=\"%v\")", m.MACAddress)
}
if _, seen := ips[m.BMCIPAddress]; seen {
return fmt.Errorf("duplicate IPAddress: %v", m.BMCIPAddress)
}
ips[m.BMCIPAddress] = struct{}{}
return nil
}
}
// RegisterDefaultAssertions applies a set of default assertions to validator. The default assertions
// include UniqueHostnames and UniqueIDs.
func RegisterDefaultAssertions(validator *DefaultMachineValidator) {
validator.Register([]MachineAssertion{
StaticMachineAssertions(),
UniqueIPAddress(),
UniqueMACAddress(),
UniqueHostnames(),
UniqueBMCIPAddress(),
}...)
}
func validateLabelKey(k string) error {
if errs := apimachineryvalidation.IsQualifiedName(k); len(errs) != 0 {
return fmt.Errorf("%v", strings.Join(errs, "; "))
}
return nil
}
func validateLabelValue(v string) error {
if errs := apimachineryvalidation.IsValidLabelValue(v); len(errs) != 0 {
return fmt.Errorf("%v", strings.Join(errs, "; "))
}
return nil
}
// LabelsMatchSelector ensures all selector key-value pairs can be found in labels.
// If selector is empty true is always returned.
func LabelsMatchSelector(selector v1alpha1.HardwareSelector, labels Labels) bool {
for expectKey, expectValue := range selector {
labelValue, hasLabel := labels[expectKey]
if !hasLabel || labelValue != expectValue {
return false
}
}
return true
}
| 265 |
eks-anywhere | aws | Go | package hardware_test
import (
"errors"
"testing"
"github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestDefaultMachineValidatorValidationsRun(t *testing.T) {
g := gomega.NewWithT(t)
// check is set by assertion when its called and allows us to validate
// registered assertions are infact called by the validation decorator.
var check bool
assertion := func(m hardware.Machine) error {
check = true
return nil
}
validator := &hardware.DefaultMachineValidator{}
validator.Register(assertion)
err := validator.Validate(NewValidMachine())
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(check).To(gomega.BeTrue())
}
func TestDefaultMachineValidatorErrorsWhenAssertionErrors(t *testing.T) {
g := gomega.NewWithT(t)
// check is set by assertion when its called and allows us to validate
// registered assertions are infact called by the validation decorator.
expect := errors.New("something went wrong")
assertion := func(hardware.Machine) error {
return expect
}
validator := &hardware.DefaultMachineValidator{}
validator.Register(assertion)
err := validator.Validate(NewValidMachine())
g.Expect(err).To(gomega.BeEquivalentTo(expect))
}
func TestUniquenessAssertions(t *testing.T) {
cases := map[string]struct {
Assertion hardware.MachineAssertion
Machines []hardware.Machine
}{
"IPAddresses": {
Assertion: hardware.UniqueIPAddress(),
Machines: []hardware.Machine{
{IPAddress: "foo"},
{IPAddress: "bar"},
},
},
"MACAddresses": {
Assertion: hardware.UniqueMACAddress(),
Machines: []hardware.Machine{
{MACAddress: "foo"},
{MACAddress: "bar"},
},
},
"Hostnames": {
Assertion: hardware.UniqueHostnames(),
Machines: []hardware.Machine{
{Hostname: "foo"},
{Hostname: "bar"},
},
},
"BMCIPAddresses": {
Assertion: hardware.UniqueBMCIPAddress(),
Machines: []hardware.Machine{
{BMCIPAddress: "foo"},
{BMCIPAddress: "bar"},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
g := gomega.NewWithT(t)
g.Expect(tc.Assertion(tc.Machines[0])).ToNot(gomega.HaveOccurred())
g.Expect(tc.Assertion(tc.Machines[1])).ToNot(gomega.HaveOccurred())
})
}
}
func TestUniquenessAssertionsWithDupes(t *testing.T) {
cases := map[string]struct {
Assertion hardware.MachineAssertion
Machines []hardware.Machine
}{
"IPAddresses": {
Assertion: hardware.UniqueIPAddress(),
Machines: []hardware.Machine{
{IPAddress: "foo"},
{IPAddress: "foo"},
},
},
"MACAddresses": {
Assertion: hardware.UniqueMACAddress(),
Machines: []hardware.Machine{
{MACAddress: "foo"},
{MACAddress: "foo"},
},
},
"Hostnames": {
Assertion: hardware.UniqueHostnames(),
Machines: []hardware.Machine{
{Hostname: "foo"},
{Hostname: "foo"},
},
},
"BMCIPAddresses": {
Assertion: hardware.UniqueBMCIPAddress(),
Machines: []hardware.Machine{
{BMCIPAddress: "foo"},
{BMCIPAddress: "foo"},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
g := gomega.NewWithT(t)
g.Expect(tc.Assertion(tc.Machines[0])).ToNot(gomega.HaveOccurred())
g.Expect(tc.Assertion(tc.Machines[1])).To(gomega.HaveOccurred())
})
}
}
func TestStaticMachineAssertions_ValidMachine(t *testing.T) {
g := gomega.NewWithT(t)
machine := NewValidMachine()
validate := hardware.StaticMachineAssertions()
g.Expect(validate(machine)).ToNot(gomega.HaveOccurred())
}
func TestStaticMachineAssertions_InvalidMachines(t *testing.T) {
g := gomega.NewWithT(t)
cases := map[string]func(*hardware.Machine){
"EmptyIPAddress": func(h *hardware.Machine) {
h.IPAddress = ""
},
"InvalidIPAddress": func(h *hardware.Machine) {
h.IPAddress = "invalid"
},
"EmptyGateway": func(h *hardware.Machine) {
h.Gateway = ""
},
"InvalidGateway": func(h *hardware.Machine) {
h.Gateway = "invalid"
},
"NoNameservers": func(h *hardware.Machine) {
h.Nameservers = []string{}
},
"EmptyNameserver": func(h *hardware.Machine) {
h.Nameservers = []string{""}
},
"EmptyNetmask": func(h *hardware.Machine) {
h.Netmask = ""
},
"EmptyMACAddress": func(h *hardware.Machine) {
h.MACAddress = ""
},
"InvalidMACAddress": func(h *hardware.Machine) {
h.MACAddress = "invalid mac"
},
"EmptyHostname": func(h *hardware.Machine) {
h.Hostname = ""
},
"InvalidHostname": func(h *hardware.Machine) {
h.Hostname = "!@#$%"
},
"EmptyBMCIPAddress": func(h *hardware.Machine) {
h.BMCIPAddress = ""
},
"InvalidBMCIPAddress": func(h *hardware.Machine) {
h.BMCIPAddress = "invalid"
},
"EmptyBMCUsername": func(h *hardware.Machine) {
h.BMCUsername = ""
},
"EmptyBMCPassword": func(h *hardware.Machine) {
h.BMCPassword = ""
},
"InvalidLabelKey": func(h *hardware.Machine) {
h.Labels["?$?$?"] = "foo"
},
"InvalidLabelValue": func(h *hardware.Machine) {
h.Labels["foo"] = "\\/dsa"
},
"InvalidDisk": func(h *hardware.Machine) {
h.Disk = "*&!@#!%"
},
"InvalidWithJustDev": func(h *hardware.Machine) {
h.Disk = "/dev/"
},
"InvalidVLANUnder": func(h *hardware.Machine) {
h.VLANID = "0"
},
"InvalidVLANOver": func(h *hardware.Machine) {
h.VLANID = "4095"
},
"NonIntVLAN": func(h *hardware.Machine) {
h.VLANID = "im not an int"
},
}
validate := hardware.StaticMachineAssertions()
for name, mutate := range cases {
t.Run(name, func(t *testing.T) {
machine := NewValidMachine()
mutate(&machine)
g.Expect(validate(machine)).To(gomega.HaveOccurred())
})
}
}
func NewValidMachine() hardware.Machine {
return hardware.Machine{
IPAddress: "10.10.10.10",
Gateway: "10.10.10.1",
Nameservers: []string{"ns1"},
MACAddress: "00:00:00:00:00:00",
Netmask: "255.255.255.255",
Hostname: "localhost",
Labels: hardware.Labels{"type": "cp"},
Disk: "/dev/sda",
BMCIPAddress: "10.10.10.11",
BMCUsername: "username",
BMCPassword: "password",
VLANID: "200",
}
}
| 245 |
eks-anywhere | aws | Go | package hardware
import (
"fmt"
"io"
"os"
"path/filepath"
"sigs.k8s.io/yaml"
)
// TinkerbellManifestYAML is a MachineWriter that writes Tinkerbell manifests to a destination.
type TinkerbellManifestYAML struct {
writer io.Writer
}
// NewTinkerbellManifestYAML creates a TinkerbellManifestYAML instance that writes its manifests to w.
func NewTinkerbellManifestYAML(w io.Writer) *TinkerbellManifestYAML {
return &TinkerbellManifestYAML{writer: w}
}
// Write m as a set of Kubernetes manifests for use with Cluster API Tinkerbell Provider. This includes writing a
// Hardware, BMC and Secret (for the BMC).
func (yw *TinkerbellManifestYAML) Write(m Machine) error {
hardware, err := marshalTinkerbellHardwareYAML(m)
if err != nil {
return fmt.Errorf("marshalling tinkerbell hardware yaml (mac=%v): %v", m.MACAddress, err)
}
if err := yw.writeWithPrependedSeparator(hardware); err != nil {
return fmt.Errorf("writing tinkerbell hardware yaml (mac=%v): %v", m.MACAddress, err)
}
bmc, err := marshalTinkerbellBMCYAML(m)
if err != nil {
return fmt.Errorf("marshalling tinkerbell bmc yaml (mac=%v): %v", m.MACAddress, err)
}
if err := yw.writeWithPrependedSeparator(bmc); err != nil {
return fmt.Errorf("writing tinkerbell bmc yaml (mac=%v): %v", m.MACAddress, err)
}
secret, err := marshalSecretYAML(m)
if err != nil {
return fmt.Errorf("marshalling bmc secret yaml (mac=%v): %v", m.MACAddress, err)
}
if err := yw.writeWithPrependedSeparator(secret); err != nil {
return fmt.Errorf("writing bmc secret yaml (mac=%v): %v", m.MACAddress, err)
}
return nil
}
var yamlSeparatorWithNewline = []byte("---\n")
func (yw *TinkerbellManifestYAML) writeWithPrependedSeparator(data []byte) error {
if err := yw.write(append(data, yamlSeparatorWithNewline...)); err != nil {
return err
}
return nil
}
func (yw *TinkerbellManifestYAML) write(data []byte) error {
if _, err := yw.writer.Write(data); err != nil {
return err
}
return nil
}
// TODO(chrisdoherty4) Patch these types so we can generate yamls again with the new Hardware
// and BaseboardManagement types.
func marshalTinkerbellHardwareYAML(m Machine) ([]byte, error) {
return yaml.Marshal(hardwareFromMachine(m))
}
func marshalTinkerbellBMCYAML(m Machine) ([]byte, error) {
return yaml.Marshal(toRufioMachine(m))
}
func marshalSecretYAML(m Machine) ([]byte, error) {
return yaml.Marshal(baseboardManagementSecretFromMachine(m))
}
// CreateOrStdout will create path and return an *os.File if path is not empty. If path is empty
// os.Stdout is returned.
func CreateOrStdout(path string) (*os.File, error) {
if path != "" {
dir := filepath.Dir(path)
err := os.MkdirAll(dir, 0o755)
if err != nil {
return nil, fmt.Errorf("failed to create hardware yaml file: %v", err)
}
return os.Create(path)
}
return os.Stdout, nil
}
| 97 |
eks-anywhere | aws | Go | package hardware_test
import (
"bufio"
"bytes"
"errors"
"testing"
"github.com/onsi/gomega"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
corev1 "k8s.io/api/core/v1"
apimachineryyaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
func TestTinkerbellManifestYAMLWrites(t *testing.T) {
g := gomega.NewWithT(t)
var buf bytes.Buffer
writer := hardware.NewTinkerbellManifestYAML(&buf)
expect := NewValidMachine()
err := writer.Write(expect)
g.Expect(err).ToNot(gomega.HaveOccurred())
reader := apimachineryyaml.NewYAMLReader(bufio.NewReader(&buf))
var hardware tinkv1alpha1.Hardware
raw, err := reader.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
err = yaml.Unmarshal(raw, &hardware)
g.Expect(err).ToNot(gomega.HaveOccurred())
var bmc rufiov1alpha1.Machine
raw, err = reader.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
err = yaml.Unmarshal(raw, &bmc)
g.Expect(err).ToNot(gomega.HaveOccurred())
var secret corev1.Secret
raw, err = reader.Read()
g.Expect(err).ToNot(gomega.HaveOccurred())
err = yaml.Unmarshal(raw, &secret)
g.Expect(err).ToNot(gomega.HaveOccurred())
AssertTinkerbellHardwareRepresentsMachine(g, hardware, expect)
AssertTinkerbellBMCRepresentsMachine(g, bmc, expect)
AssertBMCSecretRepresentsMachine(g, secret, expect)
}
func TestTinkerbellManifestYAMLWriteErrors(t *testing.T) {
g := gomega.NewWithT(t)
writer := hardware.NewTinkerbellManifestYAML(ErrWriter{})
expect := NewValidMachine()
err := writer.Write(expect)
g.Expect(err).To(gomega.HaveOccurred())
}
func AssertTinkerbellHardwareRepresentsMachine(g *gomega.WithT, h tinkv1alpha1.Hardware, m hardware.Machine) {
g.Expect(h.ObjectMeta.Name).To(gomega.Equal(m.Hostname))
}
func AssertTinkerbellBMCRepresentsMachine(g *gomega.WithT, b rufiov1alpha1.Machine, m hardware.Machine) {
g.Expect(b.Spec.Connection.Host).To(gomega.Equal(m.BMCIPAddress))
}
func AssertBMCSecretRepresentsMachine(g *gomega.WithT, s corev1.Secret, m hardware.Machine) {
g.Expect(s.Data).To(gomega.HaveKeyWithValue("username", []byte(m.BMCUsername)))
g.Expect(s.Data).To(gomega.HaveKeyWithValue("password", []byte(m.BMCPassword)))
}
type ErrWriter struct{}
func (ErrWriter) Write([]byte) (int, error) {
return 0, errors.New("ErrWriter: always return an error")
}
| 84 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/providers/tinkerbell/hardware/translate.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
hardware "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
gomock "github.com/golang/mock/gomock"
)
// MockMachineReader is a mock of MachineReader interface.
type MockMachineReader struct {
ctrl *gomock.Controller
recorder *MockMachineReaderMockRecorder
}
// MockMachineReaderMockRecorder is the mock recorder for MockMachineReader.
type MockMachineReaderMockRecorder struct {
mock *MockMachineReader
}
// NewMockMachineReader creates a new mock instance.
func NewMockMachineReader(ctrl *gomock.Controller) *MockMachineReader {
mock := &MockMachineReader{ctrl: ctrl}
mock.recorder = &MockMachineReaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockMachineReader) EXPECT() *MockMachineReaderMockRecorder {
return m.recorder
}
// Read mocks base method.
func (m *MockMachineReader) Read() (hardware.Machine, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Read")
ret0, _ := ret[0].(hardware.Machine)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Read indicates an expected call of Read.
func (mr *MockMachineReaderMockRecorder) Read() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockMachineReader)(nil).Read))
}
// MockMachineWriter is a mock of MachineWriter interface.
type MockMachineWriter struct {
ctrl *gomock.Controller
recorder *MockMachineWriterMockRecorder
}
// MockMachineWriterMockRecorder is the mock recorder for MockMachineWriter.
type MockMachineWriterMockRecorder struct {
mock *MockMachineWriter
}
// NewMockMachineWriter creates a new mock instance.
func NewMockMachineWriter(ctrl *gomock.Controller) *MockMachineWriter {
mock := &MockMachineWriter{ctrl: ctrl}
mock.recorder = &MockMachineWriterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockMachineWriter) EXPECT() *MockMachineWriterMockRecorder {
return m.recorder
}
// Write mocks base method.
func (m *MockMachineWriter) Write(arg0 hardware.Machine) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Write", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Write indicates an expected call of Write.
func (mr *MockMachineWriterMockRecorder) Write(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockMachineWriter)(nil).Write), arg0)
}
// MockMachineValidator is a mock of MachineValidator interface.
type MockMachineValidator struct {
ctrl *gomock.Controller
recorder *MockMachineValidatorMockRecorder
}
// MockMachineValidatorMockRecorder is the mock recorder for MockMachineValidator.
type MockMachineValidatorMockRecorder struct {
mock *MockMachineValidator
}
// NewMockMachineValidator creates a new mock instance.
func NewMockMachineValidator(ctrl *gomock.Controller) *MockMachineValidator {
mock := &MockMachineValidator{ctrl: ctrl}
mock.recorder = &MockMachineValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockMachineValidator) EXPECT() *MockMachineValidatorMockRecorder {
return m.recorder
}
// Validate mocks base method.
func (m *MockMachineValidator) Validate(arg0 hardware.Machine) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Validate", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Validate indicates an expected call of Validate.
func (mr *MockMachineValidatorMockRecorder) Validate(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockMachineValidator)(nil).Validate), arg0)
}
| 125 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/providers/tinkerbell (interfaces: ProviderKubectlClient,SSHAuthKeyGenerator)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
executables "github.com/aws/eks-anywhere/pkg/executables"
filewriter "github.com/aws/eks-anywhere/pkg/filewriter"
rufiounreleased "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/rufiounreleased"
types "github.com/aws/eks-anywhere/pkg/types"
v1beta1 "github.com/aws/etcdadm-controller/api/v1beta1"
gomock "github.com/golang/mock/gomock"
v1alpha10 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
v1 "k8s.io/api/core/v1"
v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1"
v1beta11 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
)
// MockProviderKubectlClient is a mock of ProviderKubectlClient interface.
type MockProviderKubectlClient struct {
ctrl *gomock.Controller
recorder *MockProviderKubectlClientMockRecorder
}
// MockProviderKubectlClientMockRecorder is the mock recorder for MockProviderKubectlClient.
type MockProviderKubectlClientMockRecorder struct {
mock *MockProviderKubectlClient
}
// NewMockProviderKubectlClient creates a new mock instance.
func NewMockProviderKubectlClient(ctrl *gomock.Controller) *MockProviderKubectlClient {
mock := &MockProviderKubectlClient{ctrl: ctrl}
mock.recorder = &MockProviderKubectlClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockProviderKubectlClient) EXPECT() *MockProviderKubectlClientMockRecorder {
return m.recorder
}
// AllBaseboardManagements mocks base method.
func (m *MockProviderKubectlClient) AllBaseboardManagements(arg0 context.Context, arg1 string) ([]rufiounreleased.BaseboardManagement, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AllBaseboardManagements", arg0, arg1)
ret0, _ := ret[0].([]rufiounreleased.BaseboardManagement)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AllBaseboardManagements indicates an expected call of AllBaseboardManagements.
func (mr *MockProviderKubectlClientMockRecorder) AllBaseboardManagements(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllBaseboardManagements", reflect.TypeOf((*MockProviderKubectlClient)(nil).AllBaseboardManagements), arg0, arg1)
}
// AllTinkerbellHardware mocks base method.
func (m *MockProviderKubectlClient) AllTinkerbellHardware(arg0 context.Context, arg1 string) ([]v1alpha10.Hardware, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AllTinkerbellHardware", arg0, arg1)
ret0, _ := ret[0].([]v1alpha10.Hardware)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AllTinkerbellHardware indicates an expected call of AllTinkerbellHardware.
func (mr *MockProviderKubectlClientMockRecorder) AllTinkerbellHardware(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllTinkerbellHardware", reflect.TypeOf((*MockProviderKubectlClient)(nil).AllTinkerbellHardware), arg0, arg1)
}
// ApplyKubeSpecFromBytesForce mocks base method.
func (m *MockProviderKubectlClient) ApplyKubeSpecFromBytesForce(arg0 context.Context, arg1 *types.Cluster, arg2 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytesForce", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytesForce indicates an expected call of ApplyKubeSpecFromBytesForce.
func (mr *MockProviderKubectlClientMockRecorder) ApplyKubeSpecFromBytesForce(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesForce", reflect.TypeOf((*MockProviderKubectlClient)(nil).ApplyKubeSpecFromBytesForce), arg0, arg1, arg2)
}
// ApplyKubeSpecFromBytesWithNamespace mocks base method.
func (m *MockProviderKubectlClient) ApplyKubeSpecFromBytesWithNamespace(arg0 context.Context, arg1 *types.Cluster, arg2 []byte, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytesWithNamespace", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytesWithNamespace indicates an expected call of ApplyKubeSpecFromBytesWithNamespace.
func (mr *MockProviderKubectlClientMockRecorder) ApplyKubeSpecFromBytesWithNamespace(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesWithNamespace", reflect.TypeOf((*MockProviderKubectlClient)(nil).ApplyKubeSpecFromBytesWithNamespace), arg0, arg1, arg2, arg3)
}
// DeleteCRD mocks base method.
func (m *MockProviderKubectlClient) DeleteCRD(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteCRD", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteCRD indicates an expected call of DeleteCRD.
func (mr *MockProviderKubectlClientMockRecorder) DeleteCRD(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCRD", reflect.TypeOf((*MockProviderKubectlClient)(nil).DeleteCRD), arg0, arg1, arg2)
}
// DeleteEksaDatacenterConfig mocks base method.
func (m *MockProviderKubectlClient) DeleteEksaDatacenterConfig(arg0 context.Context, arg1, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteEksaDatacenterConfig", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteEksaDatacenterConfig indicates an expected call of DeleteEksaDatacenterConfig.
func (mr *MockProviderKubectlClientMockRecorder) DeleteEksaDatacenterConfig(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksaDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).DeleteEksaDatacenterConfig), arg0, arg1, arg2, arg3, arg4)
}
// DeleteEksaMachineConfig mocks base method.
func (m *MockProviderKubectlClient) DeleteEksaMachineConfig(arg0 context.Context, arg1, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteEksaMachineConfig", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteEksaMachineConfig indicates an expected call of DeleteEksaMachineConfig.
func (mr *MockProviderKubectlClientMockRecorder) DeleteEksaMachineConfig(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksaMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).DeleteEksaMachineConfig), arg0, arg1, arg2, arg3, arg4)
}
// GetEksaCluster mocks base method.
func (m *MockProviderKubectlClient) GetEksaCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*v1alpha1.Cluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaCluster", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1alpha1.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaCluster indicates an expected call of GetEksaCluster.
func (mr *MockProviderKubectlClientMockRecorder) GetEksaCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaCluster), arg0, arg1, arg2)
}
// GetEksaTinkerbellDatacenterConfig mocks base method.
func (m *MockProviderKubectlClient) GetEksaTinkerbellDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.TinkerbellDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaTinkerbellDatacenterConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.TinkerbellDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaTinkerbellDatacenterConfig indicates an expected call of GetEksaTinkerbellDatacenterConfig.
func (mr *MockProviderKubectlClientMockRecorder) GetEksaTinkerbellDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaTinkerbellDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaTinkerbellDatacenterConfig), arg0, arg1, arg2, arg3)
}
// GetEksaTinkerbellMachineConfig mocks base method.
func (m *MockProviderKubectlClient) GetEksaTinkerbellMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.TinkerbellMachineConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaTinkerbellMachineConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.TinkerbellMachineConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaTinkerbellMachineConfig indicates an expected call of GetEksaTinkerbellMachineConfig.
func (mr *MockProviderKubectlClientMockRecorder) GetEksaTinkerbellMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaTinkerbellMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaTinkerbellMachineConfig), arg0, arg1, arg2, arg3)
}
// GetEtcdadmCluster mocks base method.
func (m *MockProviderKubectlClient) GetEtcdadmCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta1.EtcdadmCluster, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetEtcdadmCluster", varargs...)
ret0, _ := ret[0].(*v1beta1.EtcdadmCluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEtcdadmCluster indicates an expected call of GetEtcdadmCluster.
func (mr *MockProviderKubectlClientMockRecorder) GetEtcdadmCluster(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEtcdadmCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEtcdadmCluster), varargs...)
}
// GetKubeadmControlPlane mocks base method.
func (m *MockProviderKubectlClient) GetKubeadmControlPlane(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta11.KubeadmControlPlane, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetKubeadmControlPlane", varargs...)
ret0, _ := ret[0].(*v1beta11.KubeadmControlPlane)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetKubeadmControlPlane indicates an expected call of GetKubeadmControlPlane.
func (mr *MockProviderKubectlClientMockRecorder) GetKubeadmControlPlane(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKubeadmControlPlane", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetKubeadmControlPlane), varargs...)
}
// GetMachineDeployment mocks base method.
func (m *MockProviderKubectlClient) GetMachineDeployment(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) (*v1beta10.MachineDeployment, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetMachineDeployment", varargs...)
ret0, _ := ret[0].(*v1beta10.MachineDeployment)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetMachineDeployment indicates an expected call of GetMachineDeployment.
func (mr *MockProviderKubectlClientMockRecorder) GetMachineDeployment(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeployment", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetMachineDeployment), varargs...)
}
// GetProvisionedTinkerbellHardware mocks base method.
func (m *MockProviderKubectlClient) GetProvisionedTinkerbellHardware(arg0 context.Context, arg1, arg2 string) ([]v1alpha10.Hardware, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetProvisionedTinkerbellHardware", arg0, arg1, arg2)
ret0, _ := ret[0].([]v1alpha10.Hardware)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetProvisionedTinkerbellHardware indicates an expected call of GetProvisionedTinkerbellHardware.
func (mr *MockProviderKubectlClientMockRecorder) GetProvisionedTinkerbellHardware(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvisionedTinkerbellHardware", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetProvisionedTinkerbellHardware), arg0, arg1, arg2)
}
// GetSecret mocks base method.
func (m *MockProviderKubectlClient) GetSecret(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) (*v1.Secret, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetSecret", varargs...)
ret0, _ := ret[0].(*v1.Secret)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSecret indicates an expected call of GetSecret.
func (mr *MockProviderKubectlClientMockRecorder) GetSecret(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecret", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetSecret), varargs...)
}
// GetUnprovisionedTinkerbellHardware mocks base method.
func (m *MockProviderKubectlClient) GetUnprovisionedTinkerbellHardware(arg0 context.Context, arg1, arg2 string) ([]v1alpha10.Hardware, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetUnprovisionedTinkerbellHardware", arg0, arg1, arg2)
ret0, _ := ret[0].([]v1alpha10.Hardware)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetUnprovisionedTinkerbellHardware indicates an expected call of GetUnprovisionedTinkerbellHardware.
func (mr *MockProviderKubectlClientMockRecorder) GetUnprovisionedTinkerbellHardware(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnprovisionedTinkerbellHardware", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetUnprovisionedTinkerbellHardware), arg0, arg1, arg2)
}
// HasCRD mocks base method.
func (m *MockProviderKubectlClient) HasCRD(arg0 context.Context, arg1, arg2 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasCRD", arg0, arg1, arg2)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HasCRD indicates an expected call of HasCRD.
func (mr *MockProviderKubectlClientMockRecorder) HasCRD(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasCRD", reflect.TypeOf((*MockProviderKubectlClient)(nil).HasCRD), arg0, arg1, arg2)
}
// SearchTinkerbellDatacenterConfig mocks base method.
func (m *MockProviderKubectlClient) SearchTinkerbellDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) ([]*v1alpha1.TinkerbellDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchTinkerbellDatacenterConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].([]*v1alpha1.TinkerbellDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SearchTinkerbellDatacenterConfig indicates an expected call of SearchTinkerbellDatacenterConfig.
func (mr *MockProviderKubectlClientMockRecorder) SearchTinkerbellDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchTinkerbellDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).SearchTinkerbellDatacenterConfig), arg0, arg1, arg2, arg3)
}
// SearchTinkerbellMachineConfig mocks base method.
func (m *MockProviderKubectlClient) SearchTinkerbellMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) ([]*v1alpha1.TinkerbellMachineConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchTinkerbellMachineConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].([]*v1alpha1.TinkerbellMachineConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SearchTinkerbellMachineConfig indicates an expected call of SearchTinkerbellMachineConfig.
func (mr *MockProviderKubectlClientMockRecorder) SearchTinkerbellMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchTinkerbellMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).SearchTinkerbellMachineConfig), arg0, arg1, arg2, arg3)
}
// UpdateAnnotation mocks base method.
func (m *MockProviderKubectlClient) UpdateAnnotation(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 ...executables.KubectlOpt) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2, arg3}
for _, a := range arg4 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateAnnotation", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateAnnotation indicates an expected call of UpdateAnnotation.
func (mr *MockProviderKubectlClientMockRecorder) UpdateAnnotation(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotation", reflect.TypeOf((*MockProviderKubectlClient)(nil).UpdateAnnotation), varargs...)
}
// WaitForDeployment mocks base method.
func (m *MockProviderKubectlClient) WaitForDeployment(arg0 context.Context, arg1 *types.Cluster, arg2, arg3, arg4, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForDeployment", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForDeployment indicates an expected call of WaitForDeployment.
func (mr *MockProviderKubectlClientMockRecorder) WaitForDeployment(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeployment", reflect.TypeOf((*MockProviderKubectlClient)(nil).WaitForDeployment), arg0, arg1, arg2, arg3, arg4, arg5)
}
// WaitForRufioMachines mocks base method.
func (m *MockProviderKubectlClient) WaitForRufioMachines(arg0 context.Context, arg1 *types.Cluster, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForRufioMachines", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForRufioMachines indicates an expected call of WaitForRufioMachines.
func (mr *MockProviderKubectlClientMockRecorder) WaitForRufioMachines(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForRufioMachines", reflect.TypeOf((*MockProviderKubectlClient)(nil).WaitForRufioMachines), arg0, arg1, arg2, arg3, arg4)
}
// MockSSHAuthKeyGenerator is a mock of SSHAuthKeyGenerator interface.
type MockSSHAuthKeyGenerator struct {
ctrl *gomock.Controller
recorder *MockSSHAuthKeyGeneratorMockRecorder
}
// MockSSHAuthKeyGeneratorMockRecorder is the mock recorder for MockSSHAuthKeyGenerator.
type MockSSHAuthKeyGeneratorMockRecorder struct {
mock *MockSSHAuthKeyGenerator
}
// NewMockSSHAuthKeyGenerator creates a new mock instance.
func NewMockSSHAuthKeyGenerator(ctrl *gomock.Controller) *MockSSHAuthKeyGenerator {
mock := &MockSSHAuthKeyGenerator{ctrl: ctrl}
mock.recorder = &MockSSHAuthKeyGeneratorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSSHAuthKeyGenerator) EXPECT() *MockSSHAuthKeyGeneratorMockRecorder {
return m.recorder
}
// GenerateSSHAuthKey mocks base method.
func (m *MockSSHAuthKeyGenerator) GenerateSSHAuthKey(arg0 filewriter.FileWriter) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GenerateSSHAuthKey", arg0)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GenerateSSHAuthKey indicates an expected call of GenerateSSHAuthKey.
func (mr *MockSSHAuthKeyGeneratorMockRecorder) GenerateSSHAuthKey(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSSHAuthKey", reflect.TypeOf((*MockSSHAuthKeyGenerator)(nil).GenerateSSHAuthKey), arg0)
}
| 431 |
eks-anywhere | aws | Go | package reconciler_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/internal/test/envtest"
)
var env *envtest.Environment
func TestMain(m *testing.M) {
os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env)))
}
| 15 |
eks-anywhere | aws | Go | package reconciler
import (
"context"
"fmt"
"reflect"
"github.com/go-logr/logr"
"github.com/pkg/errors"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
c "github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware"
)
const (
// NewClusterOperation indicates to create a new cluster.
NewClusterOperation Operation = "NewCluster"
// K8sVersionUpgradeOperation indicates to upgrade all nodes to a new Kubernetes version.
K8sVersionUpgradeOperation Operation = "K8sVersionUpgrade"
// NoChange indicates no change made to cluster during periodical sync.
NoChange Operation = "NoChange"
)
// Operation indicates the desired change on a cluster.
type Operation string
// CNIReconciler is an interface for reconciling CNI in the Tinkerbell cluster reconciler.
type CNIReconciler interface {
Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *c.Spec) (controller.Result, error)
}
// RemoteClientRegistry is an interface that defines methods for remote clients.
type RemoteClientRegistry interface {
GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error)
}
// IPValidator is an interface that defines methods to validate the control plane IP.
type IPValidator interface {
ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error)
}
// Scope object for Tinkerbell reconciler.
type Scope struct {
ClusterSpec *c.Spec
ControlPlane *tinkerbell.ControlPlane
Workers *tinkerbell.Workers
}
// NewScope creates a new Tinkerbell Reconciler Scope.
func NewScope(clusterSpec *c.Spec) *Scope {
return &Scope{
ClusterSpec: clusterSpec,
}
}
// Reconciler for Tinkerbell.
type Reconciler struct {
client client.Client
cniReconciler CNIReconciler
remoteClientRegistry RemoteClientRegistry
ipValidator IPValidator
}
// New defines a new Tinkerbell reconciler.
func New(client client.Client, cniReconciler CNIReconciler, remoteClientRegistry RemoteClientRegistry, ipValidator IPValidator) *Reconciler {
return &Reconciler{
client: client,
cniReconciler: cniReconciler,
remoteClientRegistry: remoteClientRegistry,
ipValidator: ipValidator,
}
}
// Reconcile reconciles cluster to desired state.
func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
// Implement reconcile all here.
// This would include validating machine and datacenter configs
// and reconciling cp and worker nodes.
log = log.WithValues("provider", "tinkerbell")
clusterSpec, err := c.BuildSpec(ctx, clientutil.NewKubeClient(r.client), cluster)
if err != nil {
return controller.Result{}, err
}
return controller.NewPhaseRunner[*Scope]().Register(
r.ValidateControlPlaneIP,
r.ValidateClusterSpec,
r.GenerateSpec,
r.ValidateHardware,
r.ValidateDatacenterConfig,
r.ValidateRufioMachines,
r.CleanupStatusAfterValidate,
r.ReconcileControlPlane,
r.CheckControlPlaneReady,
r.ReconcileCNI,
r.ReconcileWorkers,
).Run(ctx, log, NewScope(clusterSpec))
}
// ValidateControlPlaneIP passes the cluster spec from tinkerbellScope to the IP Validator.
func (r *Reconciler) ValidateControlPlaneIP(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
return r.ipValidator.ValidateControlPlaneIP(ctx, log, tinkerbellScope.ClusterSpec)
}
// CleanupStatusAfterValidate removes errors from the cluster status with the tinkerbellScope.
func (r *Reconciler) CleanupStatusAfterValidate(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
return clusters.CleanupStatusAfterValidate(ctx, log, tinkerbellScope.ClusterSpec)
}
// ValidateClusterSpec performs a set of assertions on a cluster spec.
func (r *Reconciler) ValidateClusterSpec(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
clusterSpec := tinkerbellScope.ClusterSpec
log = log.WithValues("phase", "validateClusterSpec")
tinkerbellClusterSpec := tinkerbell.NewClusterSpec(clusterSpec, clusterSpec.Config.TinkerbellMachineConfigs, clusterSpec.Config.TinkerbellDatacenter)
clusterSpecValidator := tinkerbell.NewClusterSpecValidator()
if err := clusterSpecValidator.Validate(tinkerbellClusterSpec); err != nil {
log.Error(err, "Invalid Tinkerbell Cluster spec")
failureMessage := err.Error()
clusterSpec.Cluster.Status.FailureMessage = &failureMessage
return controller.ResultWithReturn(), nil
}
return controller.Result{}, nil
}
// GenerateSpec generates Tinkerbell control plane and workers spec.
func (r *Reconciler) GenerateSpec(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
spec := tinkerbellScope.ClusterSpec
log = log.WithValues("phase", "GenerateSpec")
cp, err := tinkerbell.ControlPlaneSpec(ctx, log, clientutil.NewKubeClient(r.client), spec)
if err != nil {
return controller.Result{}, errors.Wrap(err, "generating control plane spec")
}
tinkerbellScope.ControlPlane = cp
w, err := tinkerbell.WorkersSpec(ctx, log, clientutil.NewKubeClient(r.client), spec)
if err != nil {
return controller.Result{}, errors.Wrap(err, "generating workers spec")
}
tinkerbellScope.Workers = w
err = r.omitTinkerbellMachineTemplates(ctx, tinkerbellScope)
if err != nil {
return controller.Result{}, err
}
return controller.Result{}, nil
}
// DetectOperation detects change type.
func (r *Reconciler) DetectOperation(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (Operation, error) {
log.Info("Detecting operation type")
currentKCP, err := controller.GetKubeadmControlPlane(ctx, r.client, tinkerbellScope.ClusterSpec.Cluster)
if err != nil {
return "", err
}
if currentKCP == nil {
log.Info("Operation detected", "operation", NewClusterOperation)
return NewClusterOperation, nil
}
// The restriction that not allowing scaling and rolling is covered in webhook.
if currentKCP.Spec.Version != tinkerbellScope.ControlPlane.KubeadmControlPlane.Spec.Version {
log.Info("Operation detected", "operation", K8sVersionUpgradeOperation)
return K8sVersionUpgradeOperation, nil
}
log.Info("Operation detected", "operation", NoChange)
return NoChange, nil
}
func (r *Reconciler) omitTinkerbellMachineTemplates(ctx context.Context, tinkerbellScope *Scope) error { //nolint:gocyclo
currentKCP, err := controller.GetKubeadmControlPlane(ctx, r.client, tinkerbellScope.ClusterSpec.Cluster)
if err != nil {
return errors.Wrap(err, "failed to get kubeadmcontrolplane")
}
if currentKCP == nil || currentKCP.Spec.Version != tinkerbellScope.ControlPlane.KubeadmControlPlane.Spec.Version {
return nil
}
cpMachineTemplate, err := tinkerbell.GetMachineTemplate(ctx, clientutil.NewKubeClient(r.client), currentKCP.Spec.MachineTemplate.InfrastructureRef.Name, currentKCP.GetNamespace())
if err != nil && !apierrors.IsNotFound(err) {
return errors.Wrap(err, "failed to get controlplane machinetemplate")
}
if cpMachineTemplate != nil {
tinkerbellScope.ControlPlane.ControlPlaneMachineTemplate = nil
tinkerbellScope.ControlPlane.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = cpMachineTemplate.GetName()
}
for i, wg := range tinkerbellScope.Workers.Groups {
machineDeployment, err := controller.GetMachineDeployment(ctx, r.client, wg.MachineDeployment.GetName())
if err != nil {
return errors.Wrap(err, "failed to get workernode group machinedeployment")
}
if machineDeployment == nil ||
!reflect.DeepEqual(machineDeployment.Spec.Template.Spec.Version, tinkerbellScope.Workers.Groups[i].MachineDeployment.Spec.Template.Spec.Version) {
continue
}
workerMachineTemplate, err := tinkerbell.GetMachineTemplate(ctx, clientutil.NewKubeClient(r.client), machineDeployment.Spec.Template.Spec.InfrastructureRef.Name, machineDeployment.GetNamespace())
if err != nil && !apierrors.IsNotFound(err) {
return errors.Wrap(err, "failed to get workernode group machinetemplate")
}
if workerMachineTemplate != nil {
tinkerbellScope.Workers.Groups[i].ProviderMachineTemplate = nil
tinkerbellScope.Workers.Groups[i].MachineDeployment.Spec.Template.Spec.InfrastructureRef.Name = workerMachineTemplate.GetName()
}
}
return nil
}
// ReconcileControlPlane applies the control plane CAPI objects to the cluster.
func (r *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
log = log.WithValues("phase", "reconcileControlPlane")
log.Info("Applying control plane CAPI objects")
return clusters.ReconcileControlPlane(ctx, r.client, toClientControlPlane(tinkerbellScope.ControlPlane))
}
// CheckControlPlaneReady checks whether the control plane for an eks-a cluster is ready or not.
// Requeues with the appropriate wait times whenever the cluster is not ready yet.
func (r *Reconciler) CheckControlPlaneReady(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
clusterSpec := tinkerbellScope.ClusterSpec
log = log.WithValues("phase", "checkControlPlaneReady")
return clusters.CheckControlPlaneReady(ctx, r.client, log, clusterSpec.Cluster)
}
// ReconcileWorkerNodes reconciles the worker nodes to the desired state.
func (r *Reconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
log = log.WithValues("provider", "tinkerbell", "reconcile type", "workers")
clusterSpec, err := c.BuildSpec(ctx, clientutil.NewKubeClient(r.client), cluster)
if err != nil {
return controller.Result{}, errors.Wrap(err, "building cluster Spec for worker node reconcile")
}
return controller.NewPhaseRunner[*Scope]().Register(
r.ValidateClusterSpec,
r.GenerateSpec,
r.ValidateHardware,
r.ValidateRufioMachines,
r.ReconcileWorkers,
).Run(ctx, log, NewScope(clusterSpec))
}
// ReconcileWorkers applies the worker CAPI objects to the cluster.
func (r *Reconciler) ReconcileWorkers(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
spec := tinkerbellScope.ClusterSpec
log = log.WithValues("phase", "reconcileWorkers")
log.Info("Applying worker CAPI objects")
return clusters.ReconcileWorkersForEKSA(ctx, log, r.client, spec.Cluster, clusters.ToWorkers(tinkerbellScope.Workers))
}
// ValidateDatacenterConfig updates the cluster status if the TinkerbellDatacenter status indicates that the spec is invalid.
func (r *Reconciler) ValidateDatacenterConfig(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
log = log.WithValues("phase", "validateDatacenterConfig")
if err := r.validateTinkerbellIPMatch(ctx, tinkerbellScope.ClusterSpec); err != nil {
log.Error(err, "Invalid TinkerbellDatacenterConfig")
failureMessage := err.Error()
tinkerbellScope.ClusterSpec.Cluster.Status.FailureMessage = &failureMessage
return controller.ResultWithReturn(), nil
}
return controller.Result{}, nil
}
// ReconcileCNI reconciles the CNI to the desired state.
func (r *Reconciler) ReconcileCNI(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
clusterSpec := tinkerbellScope.ClusterSpec
log = log.WithValues("phase", "reconcileCNI")
client, err := r.remoteClientRegistry.GetClient(ctx, controller.CapiClusterObjectKey(clusterSpec.Cluster))
if err != nil {
return controller.Result{}, err
}
return r.cniReconciler.Reconcile(ctx, log, client, clusterSpec)
}
func (r *Reconciler) validateTinkerbellIPMatch(ctx context.Context, clusterSpec *c.Spec) error {
if clusterSpec.Cluster.IsManaged() {
// for workload cluster tinkerbell IP must match management cluster tinkerbell IP
managementClusterSpec := &anywherev1.Cluster{}
err := r.client.Get(ctx, client.ObjectKey{
Namespace: clusterSpec.Cluster.Namespace,
Name: clusterSpec.Cluster.Spec.ManagementCluster.Name,
}, managementClusterSpec)
if err != nil {
return err
}
managementDatacenterConfig := &anywherev1.TinkerbellDatacenterConfig{}
err = r.client.Get(ctx, client.ObjectKey{
Namespace: clusterSpec.Cluster.Namespace,
Name: managementClusterSpec.Spec.DatacenterRef.Name,
}, managementDatacenterConfig)
if err != nil {
return err
}
if clusterSpec.TinkerbellDatacenter.Spec.TinkerbellIP != managementDatacenterConfig.Spec.TinkerbellIP {
return errors.New("workload cluster Tinkerbell IP must match managment cluster Tinkerbell IP")
}
}
return nil
}
func toClientControlPlane(cp *tinkerbell.ControlPlane) *clusters.ControlPlane {
other := make([]client.Object, 0, 1)
if cp.Secrets != nil {
other = append(other, cp.Secrets)
}
return &clusters.ControlPlane{
Cluster: cp.Cluster,
ProviderCluster: cp.ProviderCluster,
KubeadmControlPlane: cp.KubeadmControlPlane,
ControlPlaneMachineTemplate: cp.ControlPlaneMachineTemplate,
EtcdCluster: cp.EtcdCluster,
EtcdMachineTemplate: cp.EtcdMachineTemplate,
Other: other,
}
}
// ValidateHardware performs a set of validations on the tinkerbell hardware read from the cluster.
func (r *Reconciler) ValidateHardware(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
clusterSpec := tinkerbellScope.ClusterSpec
log = log.WithValues("phase", "validateHardware")
// We need a new reader each time so that the catalogue gets recreated.
kubeReader := hardware.NewKubeReader(r.client)
if err := kubeReader.LoadHardware(ctx); err != nil {
log.Error(err, "Loading hardware failure")
failureMessage := err.Error()
clusterSpec.Cluster.Status.FailureMessage = &failureMessage
return controller.Result{}, err
}
var v tinkerbell.ClusterSpecValidator
v.Register(tinkerbell.HardwareSatisfiesOnlyOneSelectorAssertion(kubeReader.GetCatalogue()))
o, err := r.DetectOperation(ctx, log, tinkerbellScope)
if err != nil {
return controller.Result{}, err
}
switch o {
case K8sVersionUpgradeOperation:
v.Register(tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(kubeReader.GetCatalogue()))
case NewClusterOperation:
v.Register(tinkerbell.MinimumHardwareAvailableAssertionForCreate(kubeReader.GetCatalogue()))
case NoChange:
currentKCP, err := controller.GetKubeadmControlPlane(ctx, r.client, tinkerbellScope.ClusterSpec.Cluster)
if err != nil {
return controller.Result{}, err
}
var wgs []*clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]
for _, wnc := range tinkerbellScope.ClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
md := &clusterv1.MachineDeployment{}
mdName := clusterapi.MachineDeploymentName(tinkerbellScope.ClusterSpec.Cluster, wnc)
key := types.NamespacedName{Namespace: constants.EksaSystemNamespace, Name: mdName}
err := r.client.Get(ctx, key, md)
if err == nil {
wgs = append(wgs, &clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
MachineDeployment: md,
})
} else if !apierrors.IsNotFound(err) {
return controller.Result{}, err
}
}
validatableCAPI := &tinkerbell.ValidatableTinkerbellCAPI{
KubeadmControlPlane: currentKCP,
WorkerGroups: wgs,
}
v.Register(tinkerbell.AssertionsForScaleUpDown(kubeReader.GetCatalogue(), validatableCAPI, false))
}
tinkClusterSpec := tinkerbell.NewClusterSpec(
clusterSpec,
clusterSpec.Config.TinkerbellMachineConfigs,
clusterSpec.Config.TinkerbellDatacenter,
)
if err := v.Validate(tinkClusterSpec); err != nil {
log.Error(err, "Hardware validation failure")
failureMessage := fmt.Errorf("hardware validation failure: %v", err).Error()
clusterSpec.Cluster.Status.FailureMessage = &failureMessage
return controller.Result{}, err
}
return controller.Result{}, nil
}
// ValidateRufioMachines checks to ensure all the Rufio machines condition contactable is True.
func (r *Reconciler) ValidateRufioMachines(ctx context.Context, log logr.Logger, tinkerbellScope *Scope) (controller.Result, error) {
clusterSpec := tinkerbellScope.ClusterSpec
log = log.WithValues("phase", "validateRufioMachines")
kubeReader := hardware.NewKubeReader(r.client)
if err := kubeReader.LoadRufioMachines(ctx); err != nil {
log.Error(err, "loading existing rufio machines from the cluster")
failureMessage := err.Error()
clusterSpec.Cluster.Status.FailureMessage = &failureMessage
return controller.Result{}, err
}
for _, rm := range kubeReader.GetCatalogue().AllBMCs() {
if err := r.checkContactable(rm); err != nil {
log.Error(err, "rufio machine check failure")
failureMessage := err.Error()
clusterSpec.Cluster.Status.FailureMessage = &failureMessage
return controller.Result{}, err
}
}
return controller.Result{}, nil
}
func (r *Reconciler) checkContactable(rm *rufiov1alpha1.Machine) error {
for _, c := range rm.Status.Conditions {
if c.Type == rufiov1alpha1.Contactable {
if c.Status == rufiov1alpha1.ConditionTrue {
return nil
}
if c.Status == rufiov1alpha1.ConditionFalse {
return errors.New(c.Message)
}
}
}
return nil
}
| 461 |
eks-anywhere | aws | Go | package reconciler_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1"
rufiov1alpha1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
clusterspec "github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/reconciler"
tinkerbellreconcilermocks "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/reconciler/mocks"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
const (
workloadClusterName = "workload-cluster"
clusterNamespace = "test-namespace"
)
func TestReconcilerGenerateSpec(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
result, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(scope.ControlPlane).To(Equal(tinkerbellCP(workloadClusterName)))
tt.Expect(scope.Workers).To(Equal(tinkWorker(workloadClusterName)))
tt.cleanup()
}
func TestReconcilerReconcileSuccess(t *testing.T) {
tt := newReconcilerTest(t)
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw1", "cp"))
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw2", "worker"))
tt.createAllObjs()
logger := test.NewNullLogger()
remoteClient := env.Client()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, tt.buildSpec()).Return(controller.Result{}, nil)
tt.remoteClientRegistry.EXPECT().GetClient(
tt.ctx, client.ObjectKey{Name: workloadClusterName, Namespace: constants.EksaSystemNamespace},
).Return(remoteClient, nil)
tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec())
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(result).To(Equal(controller.Result{}))
tt.cleanup()
}
func TestReconcilerValidateDatacenterConfigSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.withFakeClient()
result, err := tt.reconciler().ValidateDatacenterConfig(tt.ctx, test.NewNullLogger(), tt.buildScope())
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(result).To(Equal(controller.Result{}))
tt.cleanup()
}
func TestReconcilerValidateDatacenterConfigMissingManagementCluster(t *testing.T) {
tt := newReconcilerTest(t)
tt.cluster.Spec.ManagementCluster.Name = "nonexistent-management-cluster"
tt.withFakeClient()
result, err := tt.reconciler().ValidateDatacenterConfig(tt.ctx, test.NewNullLogger(), tt.buildScope())
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.Expect(tt.cluster.Status.FailureMessage).To(HaveValue(ContainSubstring("\"nonexistent-management-cluster\" not found")))
tt.cleanup()
}
func TestReconcilerValidateDatacenterConfigMissingManagementDatacenter(t *testing.T) {
tt := newReconcilerTest(t)
tt.managementCluster.Spec.DatacenterRef.Name = "nonexistent-datacenter"
tt.withFakeClient()
result, err := tt.reconciler().ValidateDatacenterConfig(tt.ctx, test.NewNullLogger(), tt.buildScope())
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.Expect(tt.cluster.Status.FailureMessage).To(HaveValue(ContainSubstring("\"nonexistent-datacenter\" not found")))
tt.cleanup()
}
func TestReconcilerValidateDatacenterConfigIpMismatch(t *testing.T) {
tt := newReconcilerTest(t)
managementDatacenterConfig := dataCenter(func(d *anywherev1.TinkerbellDatacenterConfig) {
d.Name = "ip-mismatch-datacenter"
d.Spec.TinkerbellIP = "3.3.3.3"
})
tt.managementCluster.Spec.DatacenterRef.Name = managementDatacenterConfig.Name
tt.eksaSupportObjs = append(tt.eksaSupportObjs, managementDatacenterConfig)
tt.withFakeClient()
result, err := tt.reconciler().ValidateDatacenterConfig(tt.ctx, test.NewNullLogger(), tt.buildScope())
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.Expect(tt.cluster.Status.FailureMessage).To(HaveValue(ContainSubstring("workload cluster Tinkerbell IP must match managment cluster Tinkerbell IP")))
tt.cleanup()
}
func TestReconcileCNISuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.withFakeClient()
logger := test.NewNullLogger()
remoteClient := fake.NewClientBuilder().Build()
scope := tt.buildScope()
tt.remoteClientRegistry.EXPECT().GetClient(
tt.ctx, client.ObjectKey{Name: workloadClusterName, Namespace: constants.EksaSystemNamespace},
).Return(remoteClient, nil)
tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, scope.ClusterSpec)
result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.cleanup()
}
func TestReconcileCNIErrorClientRegistry(t *testing.T) {
tt := newReconcilerTest(t)
tt.withFakeClient()
logger := test.NewNullLogger()
spec := tt.buildScope()
tt.remoteClientRegistry.EXPECT().GetClient(
tt.ctx, client.ObjectKey{Name: workloadClusterName, Namespace: constants.EksaSystemNamespace},
).Return(nil, errors.New("building client"))
result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec)
tt.Expect(err).To(MatchError(ContainSubstring("building client")))
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.cleanup()
}
func TestReconcilerReconcileControlPlaneScaleSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
scope := tt.buildScope()
scope.ClusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count = 2
logger := test.NewNullLogger()
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
_, err = tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
kcp := &controlplanev1.KubeadmControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: workloadClusterName,
Namespace: constants.EksaSystemNamespace,
},
}
tt.ShouldEventuallyMatch(tt.ctx, kcp,
func(g Gomega) {
g.Expect(kcp.Spec.Replicas).To(HaveValue(BeEquivalentTo(2)))
})
tt.ShouldEventuallyExist(tt.ctx, controlPlaneMachineTemplate())
tt.cleanup()
}
func TestReconcilerReconcileControlPlaneSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
scope := tt.buildScope()
logger := test.NewNullLogger()
scope.ControlPlane = tinkerbellCP(workloadClusterName)
result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.ShouldEventuallyExist(tt.ctx, kubeadmControlPlane())
tt.ShouldEventuallyExist(tt.ctx, controlPlaneMachineTemplate())
tt.ShouldEventuallyExist(tt.ctx, &tinkerbellv1.TinkerbellCluster{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: workloadClusterName,
Namespace: constants.EksaSystemNamespace,
},
})
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = workloadClusterName
})
tt.ShouldEventuallyExist(tt.ctx, capiCluster)
tt.ShouldEventuallyNotExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "registry-credentials", Namespace: constants.EksaSystemNamespace}})
tt.cleanup()
}
func TestReconcilerReconcileControlPlaneSuccessRegistryMirrorAuthentication(t *testing.T) {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
tt := newReconcilerTest(t)
tt.createAllObjs()
scope := tt.buildScope()
scope.ClusterSpec.Cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{
Authenticate: true,
Endpoint: "1.2.3.4",
Port: "65536",
}
logger := test.NewNullLogger()
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
_, err = tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.ShouldEventuallyExist(tt.ctx, kubeadmControlPlane())
tt.ShouldEventuallyExist(tt.ctx, controlPlaneMachineTemplate())
tt.ShouldEventuallyExist(tt.ctx, &tinkerbellv1.TinkerbellCluster{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: workloadClusterName,
Namespace: constants.EksaSystemNamespace,
},
})
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = workloadClusterName
})
tt.ShouldEventuallyExist(tt.ctx, capiCluster)
tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "registry-credentials", Namespace: constants.EksaSystemNamespace}})
tt.cleanup()
}
func TestReconcilerReconcileControlPlaneFailure(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
scope := tt.buildScope()
scope.ClusterSpec.Cluster = scope.ClusterSpec.Cluster.DeepCopy()
scope.ClusterSpec.Cluster.Name = ""
logger := test.NewNullLogger()
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
_, err = tt.reconciler().ReconcileControlPlane(tt.ctx, logger, scope)
tt.Expect(err).To(MatchError(ContainSubstring("resource name may not be empty")))
tt.cleanup()
}
func TestReconcilerValidateClusterSpecInvalidDatacenterConfig(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.cluster.Name = "invalidCluster"
tt.cluster.Spec.KubernetesVersion = "1.22"
tt.datacenterConfig.Spec.TinkerbellIP = ""
tt.withFakeClient()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, gomock.Any()).Return(controller.Result{}, nil)
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("missing spec.tinkerbellIP field"))
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.cleanup()
}
func TestReconcilerValidateClusterSpecInvalidOSFamily(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.cluster.Name = "invalidCluster"
tt.machineConfigWorker.Spec.OSFamily = "invalidOS"
tt.withFakeClient()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, gomock.Any()).Return(controller.Result{}, nil)
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("unsupported spec.osFamily (invalidOS); Please use one of the following: ubuntu, redhat, bottlerocket"))
tt.cleanup()
}
func TestReconcilerReconcileWorkerNodesSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.cluster.Name = "mgmt-cluster"
tt.cluster.SetSelfManaged()
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw1", "cp"))
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw2", "worker"))
tt.createAllObjs()
logger := test.NewNullLogger()
result, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.ShouldEventuallyExist(tt.ctx,
&bootstrapv1.KubeadmConfigTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: capiCluster.Name + "-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&tinkerbellv1.TinkerbellMachineTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: capiCluster.Name + "-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&clusterv1.MachineDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: capiCluster.Name + "-md-0",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.cleanup()
}
func TestReconcilerReconcileWorkersScaleSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.cluster.Name = "mgmt-cluster"
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
mt := &tinkerbellv1.TinkerbellMachineTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: tt.cluster.Name + "-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
}
tt.createAllObjs()
scope := tt.buildScope()
scope.ClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(2)
logger := test.NewNullLogger()
scope.ControlPlane = tinkerbellCP(tt.cluster.Name)
scope.Workers = tinkWorker(tt.cluster.Name, func(w *tinkerbell.Workers) {
w.Groups[0].MachineDeployment.Spec.Replicas = ptr.Int32(2)
})
result, err := tt.reconciler().ReconcileWorkers(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.ShouldEventuallyExist(tt.ctx, mt)
md := &clusterv1.MachineDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: tt.cluster.Name + "-md-0",
Namespace: constants.EksaSystemNamespace,
},
Spec: clusterv1.MachineDeploymentSpec{
Replicas: ptr.Int32(2),
},
}
tt.ShouldEventuallyMatch(tt.ctx, md,
func(g Gomega) {
g.Expect(md.Spec.Replicas).To(HaveValue(BeEquivalentTo(2)))
})
tt.cleanup()
}
func TestReconcilerReconcileWorkersSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.cluster.Name = "mgmt-cluster"
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.createAllObjs()
scope := tt.buildScope()
logger := test.NewNullLogger()
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
_, err = tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
result, err := tt.reconciler().ReconcileWorkers(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.ShouldEventuallyExist(tt.ctx,
&clusterv1.MachineDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: tt.cluster.Name + "-md-0",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&bootstrapv1.KubeadmConfigTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: tt.cluster.Name + "-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&tinkerbellv1.TinkerbellMachineTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: tt.cluster.Name + "-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.cleanup()
}
func TestReconcilerReconcileWorkerNodesFailure(t *testing.T) {
tt := newReconcilerTest(t)
tt.cluster.Name = "mgmt-cluster"
tt.cluster.SetSelfManaged()
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.cluster.Spec.KubernetesVersion = ""
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.createAllObjs()
logger := test.NewNullLogger()
_, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster)
tt.Expect(err).To(MatchError(ContainSubstring("building cluster Spec for worker node reconcile")))
tt.cleanup()
}
func TestReconcilerValidateHardwareCountNewClusterFail(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.cluster.Name = "invalidCluster"
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw1", "cp"))
tt.withFakeClient()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, gomock.Any()).Return(controller.Result{}, nil)
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).ToNot(BeNil())
tt.Expect(result).To(Equal(controller.Result{}), "result should not stop reconciliation")
tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("minimum hardware count not met for selector '{\"type\":\"worker\"}': have 0, require 1"))
tt.cleanup()
}
func TestReconcilerValidateHardwareCountRollingUpdateFail(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
scope.ClusterSpec.VersionsBundle.KubeDistro.Kubernetes.Tag = "1.23"
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
_, err = tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
result, err := tt.reconciler().ValidateHardware(tt.ctx, logger, scope)
tt.Expect(err).ToNot(BeNil())
tt.Expect(result).To(Equal(controller.Result{}), "result should not stop reconciliation")
tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("minimum hardware count not met for selector"))
tt.cleanup()
}
func TestReconcilerValidateHardwareScalingUpdateFail(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
scope.ClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(2)
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
op, err := tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(op).To(Equal(reconciler.NoChange))
result, err := tt.reconciler().ValidateHardware(tt.ctx, logger, scope)
tt.Expect(err).NotTo(BeNil())
tt.Expect(result).To(Equal(controller.Result{}), "result should stop reconciliation")
tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("minimum hardware count not met for selector '{\"type\":\"worker\"}': have 0, require 1"))
tt.cleanup()
}
func TestReconcilerValidateHardwareNoHardware(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.cluster.Name = "invalidCluster"
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw1", "worker"))
tt.withFakeClient()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, gomock.Any()).Return(controller.Result{}, nil)
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).NotTo(BeNil())
tt.Expect(result).To(Equal(controller.Result{}), "result should not stop reconciliation")
tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("minimum hardware count not met for selector '{\"type\":\"cp\"}': have 0, require 1"))
tt.cleanup()
}
func TestReconcilerValidateRufioMachinesFail(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.cluster.Name = "invalidCluster"
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.eksaSupportObjs = append(tt.eksaSupportObjs, &rufiov1alpha1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "bmc0",
Namespace: constants.EksaSystemNamespace,
},
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, &rufiov1alpha1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "bmc1",
Namespace: constants.EksaSystemNamespace,
},
Status: rufiov1alpha1.MachineStatus{
Conditions: []rufiov1alpha1.MachineCondition{
{
Type: rufiov1alpha1.Contactable,
Status: rufiov1alpha1.ConditionTrue,
},
},
},
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, &rufiov1alpha1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "bmc2",
Namespace: constants.EksaSystemNamespace,
},
Status: rufiov1alpha1.MachineStatus{
Conditions: []rufiov1alpha1.MachineCondition{
{
Type: rufiov1alpha1.Contactable,
Status: rufiov1alpha1.ConditionFalse,
Message: "bmc connection failure",
},
},
},
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw1", "cp"))
tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw2", "worker"))
tt.withFakeClient()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, gomock.Any()).Return(controller.Result{}, nil)
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).ToNot(BeNil())
tt.Expect(result).To(Equal(controller.Result{}), "result should not stop reconciliation")
tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("bmc connection failure"))
tt.cleanup()
}
func TestReconcilerDetectOperationK8sVersionUpgrade(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
scope.ClusterSpec.VersionsBundle.KubeDistro.Kubernetes.Tag = "1.23"
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
op, err := tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(op).To(Equal(reconciler.K8sVersionUpgradeOperation))
tt.cleanup()
}
func TestReconcilerDetectOperationExistingWorkerNodeGroupScaleUpdate(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
scope.ClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(2)
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
op, err := tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(op).To(Equal(reconciler.NoChange))
tt.cleanup()
}
func TestReconcilerDetectOperationNewWorkerNodeGroupScaleUpdate(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
scope.ClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = append(scope.ClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations,
anywherev1.WorkerNodeGroupConfiguration{
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.TinkerbellMachineConfigKind,
Name: tt.machineConfigWorker.Name,
},
Name: "md-1",
Labels: nil,
},
)
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
op, err := tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(op).To(Equal(reconciler.NoChange))
tt.cleanup()
}
func TestReconcilerDetectOperationNoChanges(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
op, err := tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).To(BeNil())
tt.Expect(op).To(Equal(reconciler.NoChange))
tt.cleanup()
}
func TestReconcilerDetectOperationNewCluster(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
logger := test.NewNullLogger()
scope := tt.buildScope()
scope.ClusterSpec.Cluster.Name = "new-cluster"
_, err := tt.reconciler().GenerateSpec(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
op, err := tt.reconciler().DetectOperation(tt.ctx, logger, scope)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(op).To(Equal(reconciler.NewClusterOperation))
tt.cleanup()
}
func TestReconcilerDetectOperationFail(t *testing.T) {
tt := newReconcilerTest(t)
tt.client = fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
_, err := tt.reconciler().DetectOperation(tt.ctx, test.NewNullLogger(), &reconciler.Scope{ClusterSpec: &clusterspec.Spec{Config: &clusterspec.Config{Cluster: &anywherev1.Cluster{}}}})
tt.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}
func (tt *reconcilerTest) withFakeClient() {
tt.client = fake.NewClientBuilder().WithObjects(clientutil.ObjectsToClientObjects(tt.allObjs())...).Build()
}
func (tt *reconcilerTest) reconciler() *reconciler.Reconciler {
return reconciler.New(tt.client, tt.cniReconciler, tt.remoteClientRegistry, tt.ipValidator)
}
func (tt *reconcilerTest) buildScope() *reconciler.Scope {
tt.t.Helper()
spec, err := clusterspec.BuildSpec(tt.ctx, clientutil.NewKubeClient(tt.client), tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
return reconciler.NewScope(spec)
}
func (tt *reconcilerTest) buildSpec() *clusterspec.Spec {
tt.t.Helper()
spec, err := clusterspec.BuildSpec(tt.ctx, clientutil.NewKubeClient(tt.client), tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
return spec
}
func (tt *reconcilerTest) createAllObjs() {
tt.t.Helper()
envtest.CreateObjs(tt.ctx, tt.t, tt.client, tt.allObjs()...)
}
func (tt *reconcilerTest) allObjs() []client.Object {
objs := make([]client.Object, 0, len(tt.eksaSupportObjs)+3)
objs = append(objs, tt.eksaSupportObjs...)
objs = append(objs, tt.cluster, tt.machineConfigControlPlane, tt.machineConfigWorker, tt.managementCluster)
return objs
}
type reconcilerTest struct {
t testing.TB
*WithT
*envtest.APIExpecter
ctx context.Context
cluster *anywherev1.Cluster
managementCluster *anywherev1.Cluster
client client.Client
eksaSupportObjs []client.Object
datacenterConfig *anywherev1.TinkerbellDatacenterConfig
machineConfigControlPlane *anywherev1.TinkerbellMachineConfig
machineConfigWorker *anywherev1.TinkerbellMachineConfig
ipValidator *tinkerbellreconcilermocks.MockIPValidator
cniReconciler *tinkerbellreconcilermocks.MockCNIReconciler
remoteClientRegistry *tinkerbellreconcilermocks.MockRemoteClientRegistry
}
func newReconcilerTest(t testing.TB) *reconcilerTest {
ctrl := gomock.NewController(t)
c := env.Client()
cniReconciler := tinkerbellreconcilermocks.NewMockCNIReconciler(ctrl)
remoteClientRegistry := tinkerbellreconcilermocks.NewMockRemoteClientRegistry(ctrl)
ipValidator := tinkerbellreconcilermocks.NewMockIPValidator(ctrl)
bundle := test.Bundle()
managementClusterDatacenter := dataCenter(func(d *anywherev1.TinkerbellDatacenterConfig) {
d.Name = "management-datacenter"
})
managementCluster := tinkerbellCluster(func(c *anywherev1.Cluster) {
c.Name = "management-cluster"
c.Spec.ManagementCluster = anywherev1.ManagementCluster{
Name: c.Name,
}
c.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: bundle.Name,
Namespace: bundle.Namespace,
APIVersion: bundle.APIVersion,
}
c.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.TinkerbellDatacenterKind,
Name: managementClusterDatacenter.Name,
}
})
machineConfigCP := machineConfig(func(m *anywherev1.TinkerbellMachineConfig) {
m.Name = "cp-machine-config"
m.Spec.HardwareSelector = anywherev1.HardwareSelector{"type": "cp"}
})
machineConfigWN := machineConfig(func(m *anywherev1.TinkerbellMachineConfig) {
m.Name = "worker-machine-config"
m.Spec.HardwareSelector = anywherev1.HardwareSelector{"type": "worker"}
})
workloadClusterDatacenter := dataCenter(func(d *anywherev1.TinkerbellDatacenterConfig) {})
cluster := tinkerbellCluster(func(c *anywherev1.Cluster) {
c.Name = workloadClusterName
c.Spec.ManagementCluster = anywherev1.ManagementCluster{
Name: managementCluster.Name,
}
c.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: bundle.Name,
Namespace: bundle.Namespace,
APIVersion: bundle.APIVersion,
}
c.Spec.ControlPlaneConfiguration = anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.TinkerbellMachineConfigKind,
Name: machineConfigCP.Name,
},
}
c.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.TinkerbellDatacenterKind,
Name: workloadClusterDatacenter.Name,
}
c.Spec.WorkerNodeGroupConfigurations = append(c.Spec.WorkerNodeGroupConfigurations,
anywherev1.WorkerNodeGroupConfiguration{
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.TinkerbellMachineConfigKind,
Name: machineConfigWN.Name,
},
Name: "md-0",
Labels: nil,
},
)
})
tt := &reconcilerTest{
t: t,
WithT: NewWithT(t),
APIExpecter: envtest.NewAPIExpecter(t, c),
ctx: context.Background(),
ipValidator: ipValidator,
cniReconciler: cniReconciler,
remoteClientRegistry: remoteClientRegistry,
client: c,
eksaSupportObjs: []client.Object{
test.Namespace(clusterNamespace),
test.Namespace(constants.EksaSystemNamespace),
workloadClusterDatacenter,
managementClusterDatacenter,
bundle,
test.EksdRelease(),
},
cluster: cluster,
managementCluster: managementCluster,
datacenterConfig: workloadClusterDatacenter,
machineConfigControlPlane: machineConfigCP,
machineConfigWorker: machineConfigWN,
}
t.Cleanup(tt.cleanup)
return tt
}
func (tt *reconcilerTest) cleanup() {
tt.DeleteAndWait(tt.ctx, tt.allObjs()...)
tt.DeleteAllOfAndWait(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{})
tt.DeleteAllOfAndWait(tt.ctx, &clusterv1.Cluster{})
tt.DeleteAllOfAndWait(tt.ctx, &controlplanev1.KubeadmControlPlane{})
tt.DeleteAllOfAndWait(tt.ctx, &tinkerbellv1.TinkerbellCluster{})
tt.DeleteAllOfAndWait(tt.ctx, &tinkerbellv1.TinkerbellMachineTemplate{})
tt.DeleteAllOfAndWait(tt.ctx, &clusterv1.MachineDeployment{})
}
type clusterOpt func(*anywherev1.Cluster)
func tinkerbellCluster(opts ...clusterOpt) *anywherev1.Cluster {
c := &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterNamespace,
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.22",
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"0.0.0.0"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"0.0.0.0"},
},
},
},
}
for _, opt := range opts {
opt(c)
}
return c
}
type datacenterOpt func(config *anywherev1.TinkerbellDatacenterConfig)
func dataCenter(opts ...datacenterOpt) *anywherev1.TinkerbellDatacenterConfig {
d := &anywherev1.TinkerbellDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.TinkerbellDatacenterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "datacenter",
Namespace: clusterNamespace,
},
Spec: anywherev1.TinkerbellDatacenterConfigSpec{
TinkerbellIP: "2.2.2.2",
},
}
for _, opt := range opts {
opt(d)
}
return d
}
type tinkerbellMachineOpt func(config *anywherev1.TinkerbellMachineConfig)
func machineConfig(opts ...tinkerbellMachineOpt) *anywherev1.TinkerbellMachineConfig {
m := &anywherev1.TinkerbellMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.TinkerbellMachineConfigKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterNamespace,
},
Spec: anywherev1.TinkerbellMachineConfigSpec{
OSFamily: "bottlerocket",
HardwareSelector: anywherev1.HardwareSelector{
"key": "cp",
},
Users: []anywherev1.UserConfiguration{
{
Name: "user",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8ZEibIrz1AUBKDvmDiWLs9f5DnOerC4qPITiDtSOuPAsxgZbRMavBfVTxodMdAkYRYlXxK6PqNo0ve0qcOV2yvpxH1OogasMMetck6BlM/dIoo3vEY4ZoG9DuVRIf9Iry5gJKbpMDYWpx1IGZrDMOFcIM20ii2qLQQk5hfq9OqdqhToEJFixdgJt/y/zt6Koy3kix+XsnrVdAHgWAq4CZuwt1G6JUAqrpob3H8vPmL7aS+35ktf0pHBm6nYoxRhslnWMUb/7vpzWiq+fUBIm2LYqvrnm7t3fRqFx7p2sZqAm2jDNivyYXwRXkoQPR96zvGeMtuQ5BVGPpsDfVudSW21+pEXHI0GINtTbua7Ogz7wtpVywSvHraRgdFOeY9mkXPzvm2IhoqNrteck2GErwqSqb19mPz6LnHueK0u7i6WuQWJn0CUoCtyMGIrowXSviK8qgHXKrmfTWATmCkbtosnLskNdYuOw8bKxq5S4WgdQVhPps2TiMSZ bottlerocket@ip-10-2-0-6"},
},
},
},
}
for _, opt := range opts {
opt(m)
}
return m
}
func kubeadmControlPlane() *controlplanev1.KubeadmControlPlane {
return &controlplanev1.KubeadmControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: workloadClusterName,
Namespace: constants.EksaSystemNamespace,
},
}
}
func controlPlaneMachineTemplate() *tinkerbellv1.TinkerbellMachineTemplate {
return &tinkerbellv1.TinkerbellMachineTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: workloadClusterName + "-control-plane-1",
Namespace: constants.EksaSystemNamespace,
},
Spec: tinkerbellv1.TinkerbellMachineTemplateSpec{
Template: tinkerbellv1.TinkerbellMachineTemplateResource{
Spec: tinkerbellv1.TinkerbellMachineSpec{
HardwareAffinity: &tinkerbellv1.HardwareAffinity{
Required: []tinkerbellv1.HardwareAffinityTerm{
{
LabelSelector: metav1.LabelSelector{MatchLabels: map[string]string{}},
},
},
},
},
},
},
}
}
func tinkHardware(hardwareName, labelType string) *tinkv1alpha1.Hardware {
return &tinkv1alpha1.Hardware{
TypeMeta: metav1.TypeMeta{
Kind: "Hardware",
APIVersion: "tinkerbell.org/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: hardwareName,
Namespace: constants.EksaSystemNamespace,
Labels: map[string]string{
"type": labelType,
},
},
Spec: tinkv1alpha1.HardwareSpec{
Metadata: &tinkv1alpha1.HardwareMetadata{
Instance: &tinkv1alpha1.MetadataInstance{
ID: "foo",
},
},
},
}
}
type cpOpt func(plane *tinkerbell.ControlPlane)
func tinkerbellCP(clusterName string, opts ...cpOpt) *tinkerbell.ControlPlane {
cp := &tinkerbell.ControlPlane{
BaseControlPlane: tinkerbell.BaseControlPlane{
Cluster: &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: constants.EksaSystemNamespace,
Labels: map[string]string{"cluster.x-k8s.io/cluster-name": workloadClusterName},
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetwork{
Services: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"0.0.0.0"},
},
Pods: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"0.0.0.0"},
},
},
ControlPlaneEndpoint: clusterv1.APIEndpoint{
Host: "1.1.1.1",
Port: 6443,
},
ControlPlaneRef: &corev1.ObjectReference{
Kind: "KubeadmControlPlane",
Name: clusterName,
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
InfrastructureRef: &corev1.ObjectReference{
Kind: "TinkerbellCluster",
Name: clusterName,
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
},
Status: clusterv1.ClusterStatus{},
},
KubeadmControlPlane: &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: constants.EksaSystemNamespace,
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
Replicas: ptr.Int32(1),
Version: "v1.19.8",
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: corev1.ObjectReference{
Kind: "TinkerbellMachineTemplate",
Name: "workload-cluster-control-plane-1",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes",
Etcd: bootstrapv1.Etcd{
Local: &bootstrapv1.LocalEtcd{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "",
ImageTag: "",
},
DataDir: "",
ExtraArgs: nil,
ServerCertSANs: nil,
PeerCertSANs: nil,
},
},
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraVolumes: []bootstrapv1.HostPathMount{
{
Name: "kubeconfig",
HostPath: "/var/lib/kubeadm/controller-manager.conf",
MountPath: "/etc/kubernetes/controller-manager.conf",
ReadOnly: true,
PathType: "File",
},
},
},
Scheduler: bootstrapv1.ControlPlaneComponent{
ExtraVolumes: []bootstrapv1.HostPathMount{
{
Name: "kubeconfig",
HostPath: "/var/lib/kubeadm/scheduler.conf",
MountPath: "/etc/kubernetes/scheduler.conf",
ReadOnly: true,
PathType: "File",
},
},
},
CertificatesDir: "/var/lib/kubeadm/pki",
},
InitConfiguration: &bootstrapv1.InitConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"read-only-port": "0",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"anonymous-auth": "false",
"provider-id": "PROVIDER_ID",
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"anonymous-auth": "false",
"provider-id": "PROVIDER_ID",
"read-only-port": "0",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
IgnorePreflightErrors: []string{"DirAvailable--etc-kubernetes-manifests"},
},
CACertPath: "",
Discovery: bootstrapv1.Discovery{},
ControlPlane: nil,
SkipPhases: nil,
Patches: nil,
BottlerocketCustomHostContainers: nil,
BottlerocketCustomBootstrapContainers: nil,
},
Files: []bootstrapv1.File{
{
Path: "/etc/kubernetes/manifests/kube-vip.yaml",
Owner: "root:root",
Permissions: "",
Encoding: "",
Append: false,
Content: "apiVersion: v1\nkind: Pod\nmetadata:\n creationTimestamp: null\n name: kube-vip\n namespace: kube-system\nspec:\n containers:\n - args:\n - manager\n env:\n - name: vip_arp\n value: \"true\"\n - name: port\n value: \"6443\"\n - name: vip_cidr\n value: \"32\"\n - name: cp_enable\n value: \"true\"\n - name: cp_namespace\n value: kube-system\n - name: vip_ddns\n value: \"false\"\n - name: vip_leaderelection\n value: \"true\"\n - name: vip_leaseduration\n value: \"15\"\n - name: vip_renewdeadline\n value: \"10\"\n - name: vip_retryperiod\n value: \"2\"\n - name: address\n value: 1.1.1.1\n image: \n imagePullPolicy: IfNotPresent\n name: kube-vip\n resources: {}\n securityContext:\n capabilities:\n add:\n - NET_ADMIN\n - NET_RAW\n volumeMounts:\n - mountPath: /etc/kubernetes/admin.conf\n name: kubeconfig\n hostNetwork: true\n volumes:\n - hostPath:\n path: /etc/kubernetes/admin.conf\n name: kubeconfig\nstatus: {}\n",
ContentFrom: nil,
},
},
Users: []bootstrapv1.User{
{
Name: "user",
Sudo: ptr.String("ALL=(ALL) NOPASSWD:ALL"),
SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8ZEibIrz1AUBKDvmDiWLs9f5DnOerC4qPITiDtSOuPAsxgZbRMavBfVTxodMdAkYRYlXxK6PqNo0ve0qcOV2yvpxH1OogasMMetck6BlM/dIoo3vEY4ZoG9DuVRIf9Iry5gJKbpMDYWpx1IGZrDMOFcIM20ii2qLQQk5hfq9OqdqhToEJFixdgJt/y/zt6Koy3kix+XsnrVdAHgWAq4CZuwt1G6JUAqrpob3H8vPmL7aS+35ktf0pHBm6nYoxRhslnWMUb/7vpzWiq+fUBIm2LYqvrnm7t3fRqFx7p2sZqAm2jDNivyYXwRXkoQPR96zvGeMtuQ5BVGPpsDfVudSW21+pEXHI0GINtTbua7Ogz7wtpVywSvHraRgdFOeY9mkXPzvm2IhoqNrteck2GErwqSqb19mPz6LnHueK0u7i6WuQWJn0CUoCtyMGIrowXSviK8qgHXKrmfTWATmCkbtosnLskNdYuOw8bKxq5S4WgdQVhPps2TiMSZ bottlerocket@ip-10-2-0-6"},
},
},
Format: "bottlerocket",
},
},
},
ProviderCluster: &tinkerbellv1.TinkerbellCluster{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: constants.EksaSystemNamespace,
},
Spec: tinkerbellv1.TinkerbellClusterSpec{
ImageLookupFormat: "--kube-v1.19.8.raw.gz",
ImageLookupBaseRegistry: "/",
},
},
ControlPlaneMachineTemplate: &tinkerbellv1.TinkerbellMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster-control-plane-1",
Namespace: constants.EksaSystemNamespace,
},
Spec: tinkerbellv1.TinkerbellMachineTemplateSpec{
Template: tinkerbellv1.TinkerbellMachineTemplateResource{
Spec: tinkerbellv1.TinkerbellMachineSpec{
TemplateOverride: "global_timeout: 6000\nid: \"\"\nname: workload-cluster\ntasks:\n- actions:\n - environment:\n COMPRESSED: \"true\"\n DEST_DISK: '{{ index .Hardware.Disks 0 }}'\n IMG_URL: \"\"\n image: \"\"\n name: stream-image\n timeout: 600\n - environment:\n BOOTCONFIG_CONTENTS: kernel {}\n DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 12 }}'\n DEST_PATH: /bootconfig.data\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0644\"\n UID: \"0\"\n image: \"\"\n name: write-bootconfig\n pid: host\n timeout: 90\n - environment:\n DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 12 }}'\n DEST_PATH: /user-data.toml\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n HEGEL_URLS: http://2.2.2.2:50061,http://2.2.2.2:50061\n MODE: \"0644\"\n UID: \"0\"\n image: \"\"\n name: write-user-data\n pid: host\n timeout: 90\n - environment:\n DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 12 }}'\n DEST_PATH: /net.toml\n DIRMODE: \"0755\"\n FS_TYPE: ext4\n GID: \"0\"\n IFNAME: eno1\n MODE: \"0644\"\n STATIC_BOTTLEROCKET: \"true\"\n UID: \"0\"\n image: \"\"\n name: write-netplan\n pid: host\n timeout: 90\n - image: \"\"\n name: reboot-image\n pid: host\n timeout: 90\n volumes:\n - /worker:/worker\n name: workload-cluster\n volumes:\n - /dev:/dev\n - /dev/console:/dev/console\n - /lib/firmware:/lib/firmware:ro\n worker: '{{.device_1}}'\nversion: \"0.1\"\n",
HardwareAffinity: &tinkerbellv1.HardwareAffinity{
Required: []tinkerbellv1.HardwareAffinityTerm{
{LabelSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"type": "cp"},
}},
},
},
},
},
},
},
},
}
for _, opt := range opts {
opt(cp)
}
return cp
}
type workerOpt func(*tinkerbell.Workers)
func tinkWorker(clusterName string, opts ...workerOpt) *tinkerbell.Workers {
w := &tinkerbell.Workers{
Groups: []clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate]{
{
KubeadmConfigTemplate: &bootstrapv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmConfigTemplate",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName + "-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
Spec: bootstrapv1.KubeadmConfigTemplateSpec{
Template: bootstrapv1.KubeadmConfigTemplateResource{
Spec: bootstrapv1.KubeadmConfigSpec{
Users: []bootstrapv1.User{
{
Name: "user",
Sudo: ptr.String("ALL=(ALL) NOPASSWD:ALL"),
SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8ZEibIrz1AUBKDvmDiWLs9f5DnOerC4qPITiDtSOuPAsxgZbRMavBfVTxodMdAkYRYlXxK6PqNo0ve0qcOV2yvpxH1OogasMMetck6BlM/dIoo3vEY4ZoG9DuVRIf9Iry5gJKbpMDYWpx1IGZrDMOFcIM20ii2qLQQk5hfq9OqdqhToEJFixdgJt/y/zt6Koy3kix+XsnrVdAHgWAq4CZuwt1G6JUAqrpob3H8vPmL7aS+35ktf0pHBm6nYoxRhslnWMUb/7vpzWiq+fUBIm2LYqvrnm7t3fRqFx7p2sZqAm2jDNivyYXwRXkoQPR96zvGeMtuQ5BVGPpsDfVudSW21+pEXHI0GINtTbua7Ogz7wtpVywSvHraRgdFOeY9mkXPzvm2IhoqNrteck2GErwqSqb19mPz6LnHueK0u7i6WuQWJn0CUoCtyMGIrowXSviK8qgHXKrmfTWATmCkbtosnLskNdYuOw8bKxq5S4WgdQVhPps2TiMSZ bottlerocket@ip-10-2-0-6"},
},
},
Format: "bottlerocket",
JoinConfiguration: &bootstrapv1.JoinConfiguration{
Pause: bootstrapv1.Pause{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "",
ImageTag: "",
},
},
BottlerocketBootstrap: bootstrapv1.BottlerocketBootstrap{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "",
ImageTag: "",
},
},
BottlerocketAdmin: bootstrapv1.BottlerocketAdmin{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "",
ImageTag: "",
},
},
BottlerocketControl: bootstrapv1.BottlerocketControl{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "",
ImageTag: "",
},
},
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"anonymous-auth": "false",
"provider-id": "PROVIDER_ID",
"read-only-port": "0",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
},
},
},
},
MachineDeployment: &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "MachineDeployment",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName + "-md-0",
Namespace: constants.EksaSystemNamespace,
Labels: map[string]string{
"pool": "md-0",
"cluster.x-k8s.io/cluster-name": clusterName,
},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: workloadClusterName,
Replicas: ptr.Int32(1),
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Template: clusterv1.MachineTemplateSpec{
ObjectMeta: clusterv1.ObjectMeta{
Labels: map[string]string{
"pool": "md-0",
"cluster.x-k8s.io/cluster-name": clusterName,
},
},
Spec: clusterv1.MachineSpec{
ClusterName: clusterName,
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &corev1.ObjectReference{
Kind: "KubeadmConfigTemplate",
Name: clusterName + "-md-0-1",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
},
InfrastructureRef: corev1.ObjectReference{
Kind: "TinkerbellMachineTemplate",
Name: clusterName + "-md-0-1",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
Version: ptr.String("v1.19.8"),
},
},
},
},
ProviderMachineTemplate: &tinkerbellv1.TinkerbellMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName + "-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
Spec: tinkerbellv1.TinkerbellMachineTemplateSpec{
Template: tinkerbellv1.TinkerbellMachineTemplateResource{Spec: tinkerbellv1.TinkerbellMachineSpec{
TemplateOverride: "global_timeout: 6000\nid: \"\"\nname: " + clusterName + "\ntasks:\n- actions:\n - environment:\n COMPRESSED: \"true\"\n DEST_DISK: '{{ index .Hardware.Disks 0 }}'\n IMG_URL: \"\"\n image: \"\"\n name: stream-image\n timeout: 600\n - environment:\n BOOTCONFIG_CONTENTS: kernel {}\n DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 12 }}'\n DEST_PATH: /bootconfig.data\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n MODE: \"0644\"\n UID: \"0\"\n image: \"\"\n name: write-bootconfig\n pid: host\n timeout: 90\n - environment:\n DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 12 }}'\n DEST_PATH: /user-data.toml\n DIRMODE: \"0700\"\n FS_TYPE: ext4\n GID: \"0\"\n HEGEL_URLS: http://2.2.2.2:50061,http://2.2.2.2:50061\n MODE: \"0644\"\n UID: \"0\"\n image: \"\"\n name: write-user-data\n pid: host\n timeout: 90\n - environment:\n DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 12 }}'\n DEST_PATH: /net.toml\n DIRMODE: \"0755\"\n FS_TYPE: ext4\n GID: \"0\"\n IFNAME: eno1\n MODE: \"0644\"\n STATIC_BOTTLEROCKET: \"true\"\n UID: \"0\"\n image: \"\"\n name: write-netplan\n pid: host\n timeout: 90\n - image: \"\"\n name: reboot-image\n pid: host\n timeout: 90\n volumes:\n - /worker:/worker\n name: workload-cluster\n volumes:\n - /dev:/dev\n - /dev/console:/dev/console\n - /lib/firmware:/lib/firmware:ro\n worker: '{{.device_1}}'\nversion: \"0.1\"\n",
HardwareAffinity: &tinkerbellv1.HardwareAffinity{
Required: []tinkerbellv1.HardwareAffinityTerm{
{
LabelSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"type": "worker"},
},
},
},
},
}},
},
},
},
},
}
for _, opt := range opts {
opt(w)
}
return w
}
| 1,376 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/providers/tinkerbell/reconciler/reconciler.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
controller "github.com/aws/eks-anywhere/pkg/controller"
logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockCNIReconciler is a mock of CNIReconciler interface.
type MockCNIReconciler struct {
ctrl *gomock.Controller
recorder *MockCNIReconcilerMockRecorder
}
// MockCNIReconcilerMockRecorder is the mock recorder for MockCNIReconciler.
type MockCNIReconcilerMockRecorder struct {
mock *MockCNIReconciler
}
// NewMockCNIReconciler creates a new mock instance.
func NewMockCNIReconciler(ctrl *gomock.Controller) *MockCNIReconciler {
mock := &MockCNIReconciler{ctrl: ctrl}
mock.recorder = &MockCNIReconcilerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCNIReconciler) EXPECT() *MockCNIReconcilerMockRecorder {
return m.recorder
}
// Reconcile mocks base method.
func (m *MockCNIReconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", ctx, logger, client, spec)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockCNIReconcilerMockRecorder) Reconcile(ctx, logger, client, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockCNIReconciler)(nil).Reconcile), ctx, logger, client, spec)
}
// MockRemoteClientRegistry is a mock of RemoteClientRegistry interface.
type MockRemoteClientRegistry struct {
ctrl *gomock.Controller
recorder *MockRemoteClientRegistryMockRecorder
}
// MockRemoteClientRegistryMockRecorder is the mock recorder for MockRemoteClientRegistry.
type MockRemoteClientRegistryMockRecorder struct {
mock *MockRemoteClientRegistry
}
// NewMockRemoteClientRegistry creates a new mock instance.
func NewMockRemoteClientRegistry(ctrl *gomock.Controller) *MockRemoteClientRegistry {
mock := &MockRemoteClientRegistry{ctrl: ctrl}
mock.recorder = &MockRemoteClientRegistryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockRemoteClientRegistry) EXPECT() *MockRemoteClientRegistryMockRecorder {
return m.recorder
}
// GetClient mocks base method.
func (m *MockRemoteClientRegistry) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetClient", ctx, cluster)
ret0, _ := ret[0].(client.Client)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetClient indicates an expected call of GetClient.
func (mr *MockRemoteClientRegistryMockRecorder) GetClient(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockRemoteClientRegistry)(nil).GetClient), ctx, cluster)
}
// MockIPValidator is a mock of IPValidator interface.
type MockIPValidator struct {
ctrl *gomock.Controller
recorder *MockIPValidatorMockRecorder
}
// MockIPValidatorMockRecorder is the mock recorder for MockIPValidator.
type MockIPValidatorMockRecorder struct {
mock *MockIPValidator
}
// NewMockIPValidator creates a new mock instance.
func NewMockIPValidator(ctrl *gomock.Controller) *MockIPValidator {
mock := &MockIPValidator{ctrl: ctrl}
mock.recorder = &MockIPValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockIPValidator) EXPECT() *MockIPValidatorMockRecorder {
return m.recorder
}
// ValidateControlPlaneIP mocks base method.
func (m *MockIPValidator) ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateControlPlaneIP", ctx, log, spec)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ValidateControlPlaneIP indicates an expected call of ValidateControlPlaneIP.
func (mr *MockIPValidatorMockRecorder) ValidateControlPlaneIP(ctx, log, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneIP", reflect.TypeOf((*MockIPValidator)(nil).ValidateControlPlaneIP), ctx, log, spec)
}
| 131 |
eks-anywhere | aws | Go | /*
Copyright 2022 Tinkerbell.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package rufiounreleased contains types that never became a formal release but were included in
EKSA releases. Given we have clusters deployed containing these types it is necessary to keep
them so we can perform conversions.
*/
// nolint
package rufiounreleased
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const BaseboardManagementResourceName = "baseboardmanagements.bmc.tinkerbell.org"
// PowerState represents power state the BaseboardManagement.
type PowerState string
// BootDevice represents boot device of the BaseboardManagement.
type BootDevice string
// BaseboardManagementConditionType represents the condition of the BaseboardManagement.
type BaseboardManagementConditionType string
// ConditionStatus represents the status of a Condition.
type ConditionStatus string
const (
On PowerState = "on"
Off PowerState = "off"
)
const (
PXE BootDevice = "pxe"
Disk BootDevice = "disk"
BIOS BootDevice = "bios"
CDROM BootDevice = "cdrom"
Safe BootDevice = "safe"
)
const (
// Contactable defines that a connection can be made to the BaseboardManagement.
Contactable BaseboardManagementConditionType = "Contactable"
)
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
)
// BaseboardManagementSpec defines the desired state of BaseboardManagement.
type BaseboardManagementSpec struct {
// Connection represents the BaseboardManagement connectivity information.
Connection Connection `json:"connection"`
}
type Connection struct {
// Host is the host IP address or hostname of the BaseboardManagement.
// +kubebuilder:validation:MinLength=1
Host string `json:"host"`
// Port is the port number for connecting with the BaseboardManagement.
// +kubebuilder:default:=623
Port int `json:"port"`
// AuthSecretRef is the SecretReference that contains authentication information of the BaseboardManagement.
// The Secret must contain username and password keys.
AuthSecretRef corev1.SecretReference `json:"authSecretRef"`
// InsecureTLS specifies trusted TLS connections.
InsecureTLS bool `json:"insecureTLS"`
}
// BaseboardManagementStatus defines the observed state of BaseboardManagement.
type BaseboardManagementStatus struct {
// Power is the current power state of the BaseboardManagement.
// +kubebuilder:validation:Enum=on;off
// +optional
Power PowerState `json:"powerState,omitempty"`
// Conditions represents the latest available observations of an object's current state.
// +optional
Conditions []BaseboardManagementCondition `json:"conditions,omitempty"`
}
type BaseboardManagementCondition struct {
// Type of the BaseboardManagement condition.
Type BaseboardManagementConditionType `json:"type"`
// Status is the status of the BaseboardManagement condition.
// Can be True or False.
Status ConditionStatus `json:"status"`
// Last time the BaseboardManagement condition was updated.
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
// Message represents human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// +kubebuilder:object:generate=false
type BaseboardManagementSetConditionOption func(*BaseboardManagementCondition)
// SetCondition applies the cType condition to bm. If the condition already exists,
// it is updated.
func (bm *BaseboardManagement) SetCondition(cType BaseboardManagementConditionType, status ConditionStatus, opts ...BaseboardManagementSetConditionOption) {
var condition *BaseboardManagementCondition
// Check if there's an existing condition.
for i, c := range bm.Status.Conditions {
if c.Type == cType {
condition = &bm.Status.Conditions[i]
break
}
}
// We didn't find an existing condition so create a new one and append it.
if condition == nil {
bm.Status.Conditions = append(bm.Status.Conditions, BaseboardManagementCondition{
Type: cType,
})
condition = &bm.Status.Conditions[len(bm.Status.Conditions)-1]
}
if condition.Status != status {
condition.Status = status
condition.LastUpdateTime = metav1.Now()
}
for _, opt := range opts {
opt(condition)
}
}
// WithBaseboardManagementConditionMessage sets message m to the BaseboardManagementCondition.
func WithBaseboardManagementConditionMessage(m string) BaseboardManagementSetConditionOption {
return func(c *BaseboardManagementCondition) {
c.Message = m
}
}
// BaseboardManagementRef defines the reference information to a BaseboardManagement resource.
type BaseboardManagementRef struct {
// Name is unique within a namespace to reference a BaseboardManagement resource.
Name string `json:"name"`
// Namespace defines the space within which the BaseboardManagement name must be unique.
Namespace string `json:"namespace"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
//+kubebuilder:resource:path=baseboardmanagements,scope=Namespaced,categories=tinkerbell,singular=baseboardmanagement,shortName=bm
// BaseboardManagement is the Schema for the baseboardmanagements API.
type BaseboardManagement struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BaseboardManagementSpec `json:"spec,omitempty"`
Status BaseboardManagementStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// BaseboardManagementList contains a list of BaseboardManagement.
type BaseboardManagementList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []BaseboardManagement `json:"items"`
}
| 189 |
eks-anywhere | aws | Go | package stack
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/registrymirror"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
args = "args"
createNamespace = "createNamespace"
deploy = "deploy"
env = "env"
hostPortEnabled = "hostPortEnabled"
image = "image"
namespace = "namespace"
overridesFileName = "tinkerbell-chart-overrides.yaml"
port = "port"
boots = "boots"
hegel = "hegel"
tinkController = "tinkController"
tinkServer = "tinkServer"
rufio = "rufio"
grpcPort = "42113"
kubevip = "kubevip"
envoy = "envoy"
)
type Docker interface {
CheckContainerExistence(ctx context.Context, name string) (bool, error)
ForceRemove(ctx context.Context, name string) error
Run(ctx context.Context, image string, name string, cmd []string, flags ...string) error
}
type Helm interface {
RegistryLogin(ctx context.Context, endpoint, username, password string) error
InstallChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string) error
UpgradeChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string, opts ...executables.HelmOpt) error
}
// StackInstaller deploys a Tinkerbell stack.
//
//nolint:revive // Stutter and the interface shouldn't exist. Will clean up (chrisdoherty4)
type StackInstaller interface {
CleanupLocalBoots(ctx context.Context, forceCleanup bool) error
Install(ctx context.Context, bundle releasev1alpha1.TinkerbellBundle, tinkerbellIP, kubeconfig, hookOverride string, opts ...InstallOption) error
UninstallLocal(ctx context.Context) error
Upgrade(_ context.Context, _ releasev1alpha1.TinkerbellBundle, tinkerbellIP, kubeconfig string, hookOverride string) error
AddNoProxyIP(IP string)
GetNamespace() string
}
type Installer struct {
docker Docker
filewriter filewriter.FileWriter
helm Helm
podCidrRange string
registryMirror *registrymirror.RegistryMirror
proxyConfig *v1alpha1.ProxyConfiguration
namespace string
createNamespace bool
bootsOnDocker bool
hostPort bool
loadBalancer bool
envoy bool
}
type InstallOption func(s *Installer)
// WithNamespaceCreate is an InstallOption is lets you specify whether to create the namespace needed for Tinkerbell stack.
func WithNamespaceCreate(create bool) InstallOption {
return func(s *Installer) {
s.createNamespace = create
}
}
// WithBootsOnDocker is an InstallOption to run Boots as a Docker container.
func WithBootsOnDocker() InstallOption {
return func(s *Installer) {
s.bootsOnDocker = true
}
}
// WithBootsOnKubernetes is an InstallOption to run Boots as a Kubernetes deployment.
func WithBootsOnKubernetes() InstallOption {
return func(s *Installer) {
s.bootsOnDocker = false
}
}
// WithHostPortEnabled is an InstallOption that allows you to enable/disable host port for Tinkerbell deployments.
func WithHostPortEnabled(enabled bool) InstallOption {
return func(s *Installer) {
s.hostPort = enabled
}
}
func WithEnvoyEnabled(enabled bool) InstallOption {
return func(s *Installer) {
s.envoy = enabled
}
}
// WithLoadBalancer is an InstallOption that allows you to setup a LoadBalancer to expose hegel and tink-server.
func WithLoadBalancerEnabled(enabled bool) InstallOption {
return func(s *Installer) {
s.loadBalancer = enabled
}
}
// AddNoProxyIP is for workload cluster upgrade, we have to pass
// controlPlaneEndpoint IP of managemement cluster if proxy is configured.
func (s *Installer) AddNoProxyIP(IP string) {
s.proxyConfig.NoProxy = append(s.proxyConfig.NoProxy, IP)
}
// NewInstaller returns a Tinkerbell StackInstaller which can be used to install or uninstall the Tinkerbell stack.
func NewInstaller(docker Docker, filewriter filewriter.FileWriter, helm Helm, namespace string, podCidrRange string, registryMirror *registrymirror.RegistryMirror, proxyConfig *v1alpha1.ProxyConfiguration) StackInstaller {
return &Installer{
docker: docker,
filewriter: filewriter,
helm: helm,
registryMirror: registryMirror,
proxyConfig: proxyConfig,
namespace: namespace,
podCidrRange: podCidrRange,
}
}
// Install installs the Tinkerbell stack on a target cluster using a helm chart and providing the necessary values overrides.
func (s *Installer) Install(ctx context.Context, bundle releasev1alpha1.TinkerbellBundle, tinkerbellIP, kubeconfig, hookOverride string, opts ...InstallOption) error {
logger.V(6).Info("Installing Tinkerbell helm chart")
for _, option := range opts {
option(s)
}
bootEnv := []map[string]string{}
for k, v := range s.getBootsEnv(bundle.TinkerbellStack, tinkerbellIP) {
bootEnv = append(bootEnv, map[string]string{
"name": k,
"value": v,
})
}
osiePath, err := getURIDir(bundle.TinkerbellStack.Hook.Initramfs.Amd.URI)
if err != nil {
return fmt.Errorf("getting directory path from hook uri: %v", err)
}
if hookOverride != "" {
osiePath = hookOverride
}
valuesMap := map[string]interface{}{
namespace: s.namespace,
createNamespace: s.createNamespace,
tinkController: map[string]interface{}{
image: bundle.TinkerbellStack.Tink.TinkController.URI,
},
tinkServer: map[string]interface{}{
image: bundle.TinkerbellStack.Tink.TinkServer.URI,
args: []string{},
port: map[string]bool{
hostPortEnabled: s.hostPort,
},
},
hegel: map[string]interface{}{
image: bundle.TinkerbellStack.Hegel.URI,
port: map[string]bool{
hostPortEnabled: s.hostPort,
},
env: []map[string]string{
{
"name": "HEGEL_TRUSTED_PROXIES",
"value": s.podCidrRange,
},
},
},
boots: map[string]interface{}{
deploy: !s.bootsOnDocker,
image: bundle.TinkerbellStack.Boots.URI,
env: bootEnv,
args: []string{
"-dhcp-addr=0.0.0.0:67",
fmt.Sprintf("-osie-path-override=%s", osiePath),
},
},
rufio: map[string]interface{}{
image: bundle.TinkerbellStack.Rufio.URI,
},
kubevip: map[string]interface{}{
image: bundle.KubeVip.URI,
deploy: s.loadBalancer,
},
envoy: map[string]interface{}{
image: bundle.Envoy.URI,
deploy: s.envoy,
"externalIp": tinkerbellIP,
},
}
values, err := yaml.Marshal(valuesMap)
if err != nil {
return fmt.Errorf("marshalling values override for Tinkerbell Installer helm chart: %s", err)
}
valuesPath, err := s.filewriter.Write(overridesFileName, values)
if err != nil {
return fmt.Errorf("writing values override for Tinkerbell Installer helm chart: %s", err)
}
if err := s.authenticateHelmRegistry(ctx); err != nil {
return err
}
err = s.helm.InstallChartWithValuesFile(
ctx,
bundle.TinkerbellStack.TinkebellChart.Name,
fmt.Sprintf("oci://%s", s.localRegistryURL(bundle.TinkerbellStack.TinkebellChart.Image())),
bundle.TinkerbellStack.TinkebellChart.Tag(),
kubeconfig,
valuesPath,
)
if err != nil {
return fmt.Errorf("installing Tinkerbell helm chart: %v", err)
}
return s.installBootsOnDocker(ctx, bundle.TinkerbellStack, tinkerbellIP, kubeconfig, hookOverride)
}
func (s *Installer) installBootsOnDocker(ctx context.Context, bundle releasev1alpha1.TinkerbellStackBundle, tinkServerIP, kubeconfig, hookOverride string) error {
if !s.bootsOnDocker {
return nil
}
kubeconfig, err := filepath.Abs(kubeconfig)
if err != nil {
return fmt.Errorf("getting absolute path for kubeconfig: %v", err)
}
flags := []string{
"-v", fmt.Sprintf("%s:/kubeconfig", kubeconfig),
"--network", "host",
"-e", fmt.Sprintf("PUBLIC_IP=%s", tinkServerIP),
"-e", fmt.Sprintf("PUBLIC_SYSLOG_IP=%s", tinkServerIP),
"-e", fmt.Sprintf("BOOTS_KUBE_NAMESPACE=%v", s.namespace),
}
for name, value := range s.getBootsEnv(bundle, tinkServerIP) {
flags = append(flags, "-e", fmt.Sprintf("%s=%s", name, value))
}
osiePath, err := getURIDir(bundle.Hook.Initramfs.Amd.URI)
if err != nil {
return fmt.Errorf("getting directory path from hook uri: %v", err)
}
if hookOverride != "" {
osiePath = hookOverride
}
cmd := []string{
"-kubeconfig", "/kubeconfig",
"-dhcp-addr", "0.0.0.0:67",
"-osie-path-override", osiePath,
}
if err := s.docker.Run(ctx, s.localRegistryURL(bundle.Boots.URI), boots, cmd, flags...); err != nil {
return fmt.Errorf("running boots with docker: %v", err)
}
return nil
}
func (s *Installer) getBootsEnv(bundle releasev1alpha1.TinkerbellStackBundle, tinkServerIP string) map[string]string {
bootsEnv := map[string]string{
"DATA_MODEL_VERSION": "kubernetes",
"TINKERBELL_TLS": "false",
"TINKERBELL_GRPC_AUTHORITY": fmt.Sprintf("%s:%s", tinkServerIP, grpcPort),
}
extraKernelArgs := fmt.Sprintf("tink_worker_image=%s", s.localRegistryURL(bundle.Tink.TinkWorker.URI))
if s.registryMirror != nil {
localRegistry := s.registryMirror.BaseRegistry
extraKernelArgs = fmt.Sprintf("%s insecure_registries=%s", extraKernelArgs, localRegistry)
if s.registryMirror.Auth {
username, password, _ := config.ReadCredentials()
bootsEnv["REGISTRY_USERNAME"] = username
bootsEnv["REGISTRY_PASSWORD"] = password
}
}
if s.proxyConfig != nil {
noProxy := strings.Join(s.proxyConfig.NoProxy, ",")
extraKernelArgs = fmt.Sprintf("%s HTTP_PROXY=%s HTTPS_PROXY=%s NO_PROXY=%s", extraKernelArgs, s.proxyConfig.HttpProxy, s.proxyConfig.HttpsProxy, noProxy)
}
bootsEnv["BOOTS_EXTRA_KERNEL_ARGS"] = extraKernelArgs
return bootsEnv
}
// UninstallLocal currently removes local docker container running Boots.
func (s *Installer) UninstallLocal(ctx context.Context) error {
return s.uninstallBootsFromDocker(ctx)
}
func (s *Installer) uninstallBootsFromDocker(ctx context.Context) error {
logger.V(4).Info("Removing local boots container")
if err := s.docker.ForceRemove(ctx, boots); err != nil {
return fmt.Errorf("removing local boots container: %v", err)
}
return nil
}
func getURIDir(uri string) (string, error) {
index := strings.LastIndex(uri, "/")
if index == -1 {
return "", fmt.Errorf("uri is invalid: %s", uri)
}
return uri[:index], nil
}
// CleanupLocalBoots determines whether Boots is already running locally
// and either cleans it up or errors out depending on the `remove` flag.
func (s *Installer) CleanupLocalBoots(ctx context.Context, remove bool) error {
exists, err := s.docker.CheckContainerExistence(ctx, boots)
// return error if the docker call failed
if err != nil {
return fmt.Errorf("checking boots container existence: %v", err)
}
// return nil if boots container doesn't exist
if !exists {
return nil
}
// if remove is set, try to delete boots
if remove {
return s.uninstallBootsFromDocker(ctx)
}
// finally, return an "already exists" error if boots exists and forceCleanup is not set
return errors.New("boots container already exists, delete the container manually or re-run the command with --force-cleanup")
}
func (s *Installer) localRegistryURL(originalURL string) string {
return s.registryMirror.ReplaceRegistry(originalURL)
}
func (s *Installer) authenticateHelmRegistry(ctx context.Context) error {
if s.registryMirror != nil && s.registryMirror.Auth {
username, password, err := config.ReadCredentials()
if err != nil {
return err
}
endpoint := s.registryMirror.BaseRegistry
if err := s.helm.RegistryLogin(ctx, endpoint, username, password); err != nil {
return err
}
}
return nil
}
// Upgrade the Tinkerbell stack using images specified in bundle.
func (s *Installer) Upgrade(ctx context.Context, bundle releasev1alpha1.TinkerbellBundle, tinkerbellIP, kubeconfig string, hookOverride string) error {
logger.V(6).Info("Upgrading Tinkerbell helm chart")
bootEnv := []map[string]string{}
for k, v := range s.getBootsEnv(bundle.TinkerbellStack, tinkerbellIP) {
bootEnv = append(bootEnv, map[string]string{
"name": k,
"value": v,
})
}
osiePath, err := getURIDir(bundle.TinkerbellStack.Hook.Initramfs.Amd.URI)
if err != nil {
return fmt.Errorf("getting directory path from hook uri: %v", err)
}
if hookOverride != "" {
osiePath = hookOverride
}
valuesMap := map[string]interface{}{
namespace: s.namespace,
createNamespace: false,
tinkController: map[string]interface{}{
image: bundle.TinkerbellStack.Tink.TinkController.URI,
},
tinkServer: map[string]interface{}{
image: bundle.TinkerbellStack.Tink.TinkServer.URI,
args: []string{},
},
hegel: map[string]interface{}{
image: bundle.TinkerbellStack.Hegel.URI,
},
boots: map[string]interface{}{
image: bundle.TinkerbellStack.Boots.URI,
env: bootEnv,
args: []string{
"-dhcp-addr=0.0.0.0:67",
fmt.Sprintf("-osie-path-override=%s", osiePath),
},
},
rufio: map[string]interface{}{
image: bundle.TinkerbellStack.Rufio.URI,
},
kubevip: map[string]interface{}{
image: bundle.KubeVip.URI,
},
envoy: map[string]interface{}{
image: bundle.Envoy.URI,
},
}
values, err := yaml.Marshal(valuesMap)
if err != nil {
return fmt.Errorf("marshalling values override for Tinkerbell Installer helm chart: %s", err)
}
valuesPath, err := s.filewriter.Write(overridesFileName, values)
if err != nil {
return fmt.Errorf("writing values override for Tinkerbell Installer helm chart: %s", err)
}
if err := s.authenticateHelmRegistry(ctx); err != nil {
return err
}
envMap := map[string]string{}
if s.proxyConfig != nil {
envMap["NO_PROXY"] = strings.Join(s.proxyConfig.NoProxy, ",")
}
return s.helm.UpgradeChartWithValuesFile(
ctx,
bundle.TinkerbellStack.TinkebellChart.Name,
fmt.Sprintf("oci://%s", s.localRegistryURL(bundle.TinkerbellStack.TinkebellChart.Image())),
bundle.TinkerbellStack.TinkebellChart.Tag(),
kubeconfig,
valuesPath,
executables.WithEnv(envMap),
)
}
// GetNamespace retrieves the namespace the installer is using for stack deployment.
func (s *Installer) GetNamespace() string {
return s.namespace
}
| 461 |
eks-anywhere | aws | Go | package stack_test
import (
"context"
"errors"
"fmt"
"log"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
filewritermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack/mocks"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/types"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
overridesFileName = "tinkerbell-chart-overrides.yaml"
boots = "boots"
testIP = "1.2.3.4"
helmChartPath = "public.ecr.aws/eks-anywhere/tinkerbell/tinkerbell-chart"
helmChartName = "tinkerbell-chart"
helmChartVersion = "0.1.0"
)
var helmChartURI = fmt.Sprintf("%s:%s", helmChartPath, helmChartVersion)
func getTinkBundle() releasev1alpha1.TinkerbellBundle {
return releasev1alpha1.TinkerbellBundle{
TinkerbellStack: releasev1alpha1.TinkerbellStackBundle{
Tink: releasev1alpha1.TinkBundle{
TinkController: releasev1alpha1.Image{URI: "public.ecr.aws/eks-anywhere/tink-controller:latest"},
TinkServer: releasev1alpha1.Image{URI: "public.ecr.aws/eks-anywhere/tink-server:latest"},
TinkWorker: releasev1alpha1.Image{URI: "public.ecr.aws/eks-anywhere/tink-worker:latest"},
},
Boots: releasev1alpha1.Image{URI: "public.ecr.aws/eks-anywhere/boots:latest"},
Hegel: releasev1alpha1.Image{URI: "public.ecr.aws/eks-anywhere/hegel:latest"},
Hook: releasev1alpha1.HookBundle{
Initramfs: releasev1alpha1.HookArch{
Amd: releasev1alpha1.Archive{
URI: "https://anywhere-assests.eks.amazonaws.com/tinkerbell/hook/initramfs-x86-64",
},
},
},
Rufio: releasev1alpha1.Image{
URI: "public.ecr.aws/eks-anywhere/rufio:latest",
},
TinkebellChart: releasev1alpha1.Image{
Name: helmChartName,
URI: helmChartURI,
},
},
KubeVip: releasev1alpha1.Image{
URI: "public.ecr.aws/eks-anywhere/kube-vip:latest",
},
Envoy: releasev1alpha1.Image{
URI: "public.ecr.aws/eks-anywhere/envoy:latest",
},
}
}
func assertYamlFilesEqual(t *testing.T, wantYamlPath, gotYamlPath string) {
processUpdate(t, wantYamlPath, gotYamlPath)
if diff := cmp.Diff(unmarshalYamlToObject(t, wantYamlPath), unmarshalYamlToObject(t, gotYamlPath)); diff != "" {
t.Errorf("Expected file mismatch (-want +got):\n%s", diff)
}
}
func unmarshalYamlToObject(t *testing.T, filepath string) map[string]interface{} {
unmarshaledObject := make(map[string]interface{})
bytes := test.ReadFileAsBytes(t, filepath)
if err := yaml.Unmarshal(bytes, unmarshaledObject); err != nil {
t.Fatalf("failed to unmarshal %s: %v", filepath, err)
}
return unmarshaledObject
}
func processUpdate(t *testing.T, goldenFilePath, generatedFilePath string) {
if *test.UpdateGoldenFiles {
if err := os.WriteFile(goldenFilePath, test.ReadFileAsBytes(t, generatedFilePath), 0o644); err != nil {
t.Fatalf("failed to update golden file %s: %v", goldenFilePath, err)
}
log.Printf("Golden file updated: %s", goldenFilePath)
}
}
// Note: This test contains generated files
// To automatically update the generated files, run the following
// go test -timeout 30s -run ^TestTinkerbellStackInstallWithDifferentOptions$ github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack -update -count=1 -v.
func TestTinkerbellStackInstallWithDifferentOptions(t *testing.T) {
stackTests := []struct {
name string
hookImageOverride string
expectedFile string
installOnDocker bool
registryMirror *registrymirror.RegistryMirror
proxyConfig *v1alpha1.ProxyConfiguration
opts []stack.InstallOption
}{
{
name: "with_namespace_create_true",
expectedFile: "testdata/expected_with_namespace_create_true.yaml",
opts: []stack.InstallOption{stack.WithNamespaceCreate(true)},
},
{
name: "with_namespace_create_false",
expectedFile: "testdata/expected_with_namespace_create_false.yaml",
opts: []stack.InstallOption{stack.WithNamespaceCreate(false)},
},
{
name: "with_boots_on_docker",
expectedFile: "testdata/expected_with_boots_on_docker.yaml",
installOnDocker: true,
opts: []stack.InstallOption{stack.WithBootsOnDocker()},
},
{
name: "with_boots_on_kubernetes",
expectedFile: "testdata/expected_with_boots_on_kubernetes.yaml",
opts: []stack.InstallOption{stack.WithBootsOnKubernetes()},
},
{
name: "with_host_port_enabled_true",
expectedFile: "testdata/expected_with_host_port_enabled_true.yaml",
opts: []stack.InstallOption{stack.WithHostPortEnabled(true)},
},
{
name: "with_host_port_enabled_false",
expectedFile: "testdata/expected_with_host_port_enabled_false.yaml",
opts: []stack.InstallOption{stack.WithHostPortEnabled(false)},
},
{
name: "with_envoy_enabled_true",
expectedFile: "testdata/expected_with_envoy_enabled_true.yaml",
opts: []stack.InstallOption{stack.WithEnvoyEnabled(true)},
},
{
name: "with_envoy_enabled_false",
expectedFile: "testdata/expected_with_envoy_enabled_false.yaml",
opts: []stack.InstallOption{stack.WithEnvoyEnabled(false)},
},
{
name: "with_load_balancer_enabled_true",
expectedFile: "testdata/expected_with_load_balancer_enabled_true.yaml",
opts: []stack.InstallOption{stack.WithLoadBalancerEnabled(true)},
},
{
name: "with_load_balancer_enabled_false",
expectedFile: "testdata/expected_with_load_balancer_enabled_false.yaml",
opts: []stack.InstallOption{stack.WithLoadBalancerEnabled(false)},
},
{
name: "with_kubernetes_options",
expectedFile: "testdata/expected_with_kubernetes_options.yaml",
opts: []stack.InstallOption{
stack.WithNamespaceCreate(true),
stack.WithBootsOnKubernetes(),
stack.WithEnvoyEnabled(true),
stack.WithLoadBalancerEnabled(true),
},
},
{
name: "with_docker_options",
expectedFile: "testdata/expected_with_docker_options.yaml",
installOnDocker: true,
opts: []stack.InstallOption{
stack.WithNamespaceCreate(false),
stack.WithBootsOnDocker(),
stack.WithHostPortEnabled(true),
stack.WithEnvoyEnabled(false),
stack.WithLoadBalancerEnabled(false),
},
},
{
name: "with_hook_override",
hookImageOverride: "https://my-local-web-server/hook",
expectedFile: "testdata/expected_with_hook_override.yaml",
opts: []stack.InstallOption{},
},
{
name: "with_registry_mirror",
expectedFile: "testdata/expected_with_registry_mirror.yaml",
registryMirror: ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
"public.ecr.aws": "1.2.3.4:443/custom",
},
Auth: true,
},
opts: []stack.InstallOption{},
},
{
name: "with_proxy_config",
expectedFile: "testdata/expected_with_proxy_config.yaml",
proxyConfig: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4:3128",
HttpsProxy: "1.2.3.4:3128",
},
opts: []stack.InstallOption{},
},
}
for _, stackTest := range stackTests {
t.Run(stackTest.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
docker := mocks.NewMockDocker(mockCtrl)
helm := mocks.NewMockHelm(mockCtrl)
folder, writer := test.NewWriter(t)
cluster := &types.Cluster{Name: "test"}
ctx := context.Background()
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", stackTest.registryMirror, stackTest.proxyConfig)
generatedOverridesPath := filepath.Join(folder, "generated", overridesFileName)
if stackTest.registryMirror != nil && stackTest.registryMirror.Auth {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
helm.EXPECT().RegistryLogin(ctx, "1.2.3.4:443", "username", "password")
helm.EXPECT().InstallChartWithValuesFile(ctx, helmChartName, "oci://1.2.3.4:443/custom/eks-anywhere/tinkerbell/tinkerbell-chart", helmChartVersion, cluster.KubeconfigFile, generatedOverridesPath)
} else {
helm.EXPECT().InstallChartWithValuesFile(ctx, helmChartName, fmt.Sprintf("oci://%s", helmChartPath), helmChartVersion, cluster.KubeconfigFile, generatedOverridesPath)
}
if stackTest.installOnDocker {
docker.EXPECT().Run(ctx, "public.ecr.aws/eks-anywhere/boots:latest",
boots,
[]string{"-kubeconfig", "/kubeconfig", "-dhcp-addr", "0.0.0.0:67", "-osie-path-override", "https://anywhere-assests.eks.amazonaws.com/tinkerbell/hook"},
"-v", gomock.Any(),
"--network", "host",
"-e", gomock.Any(),
"-e", gomock.Any(),
"-e", gomock.Any(),
"-e", gomock.Any(),
"-e", gomock.Any(),
"-e", gomock.Any(),
"-e", gomock.Any(),
)
}
if err := s.Install(
ctx,
getTinkBundle(),
testIP,
cluster.KubeconfigFile,
stackTest.hookImageOverride,
stackTest.opts...,
); err != nil {
t.Fatalf("failed to install Tinkerbell stack: %v", err)
}
assertYamlFilesEqual(t, stackTest.expectedFile, generatedOverridesPath)
})
}
}
func TestTinkerbellStackUninstallLocalSucess(t *testing.T) {
mockCtrl := gomock.NewController(t)
docker := mocks.NewMockDocker(mockCtrl)
helm := mocks.NewMockHelm(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, nil)
docker.EXPECT().ForceRemove(ctx, boots)
err := s.UninstallLocal(ctx)
if err != nil {
t.Fatalf("failed to install Tinkerbell stack: %v", err)
}
}
func TestTinkerbellStackUninstallLocalFailure(t *testing.T) {
mockCtrl := gomock.NewController(t)
docker := mocks.NewMockDocker(mockCtrl)
helm := mocks.NewMockHelm(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, nil)
dockerError := "docker error"
expectedError := fmt.Sprintf("removing local boots container: %s", dockerError)
docker.EXPECT().ForceRemove(ctx, boots).Return(errors.New(dockerError))
err := s.UninstallLocal(ctx)
assert.EqualError(t, err, expectedError, "Error should be: %v, got: %v", expectedError, err)
}
func TestTinkerbellStackCheckLocalBootsExistenceDoesNotExist(t *testing.T) {
mockCtrl := gomock.NewController(t)
docker := mocks.NewMockDocker(mockCtrl)
helm := mocks.NewMockHelm(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, nil)
docker.EXPECT().CheckContainerExistence(ctx, "boots").Return(true, nil)
docker.EXPECT().ForceRemove(ctx, "boots")
err := s.CleanupLocalBoots(ctx, true)
assert.NoError(t, err)
}
func TestTinkerbellStackCheckLocalBootsExistenceDoesExist(t *testing.T) {
mockCtrl := gomock.NewController(t)
docker := mocks.NewMockDocker(mockCtrl)
helm := mocks.NewMockHelm(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, nil)
expectedErrorMsg := "boots container already exists, delete the container manually or re-run the command with --force-cleanup"
docker.EXPECT().CheckContainerExistence(ctx, "boots").Return(true, nil)
err := s.CleanupLocalBoots(ctx, false)
assert.EqualError(t, err, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, err)
}
func TestTinkerbellStackCheckLocalBootsExistenceDockerError(t *testing.T) {
mockCtrl := gomock.NewController(t)
docker := mocks.NewMockDocker(mockCtrl)
helm := mocks.NewMockHelm(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, nil)
docker.EXPECT().CheckContainerExistence(ctx, "boots").Return(false, nil)
err := s.CleanupLocalBoots(ctx, true)
assert.NoError(t, err)
}
func TestUpgrade(t *testing.T) {
var (
mockCtrl = gomock.NewController(t)
docker = mocks.NewMockDocker(mockCtrl)
helm = mocks.NewMockHelm(mockCtrl)
folder, writer = test.NewWriter(t)
valuesFile = filepath.Join(folder, "generated", overridesFileName)
cluster = &types.Cluster{Name: "test"}
ctx = context.Background()
)
helm.EXPECT().UpgradeChartWithValuesFile(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, nil)
err := s.Upgrade(ctx, getTinkBundle(), testIP, cluster.KubeconfigFile, "")
assert.NoError(t, err)
assertYamlFilesEqual(t, "testdata/expected_upgrade.yaml", valuesFile)
}
func TestUpgradeWithRegistryMirrorAuthError(t *testing.T) {
var (
mockCtrl = gomock.NewController(t)
docker = mocks.NewMockDocker(mockCtrl)
helm = mocks.NewMockHelm(mockCtrl)
_, writer = test.NewWriter(t)
cluster = &types.Cluster{Name: "test"}
ctx = context.Background()
)
registryMirror := ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
"public.ecr.aws": "1.2.3.4:443/custom",
},
Auth: true,
}
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
expectedErrorMsg := "invalid registry credentials"
helm.EXPECT().RegistryLogin(ctx, "1.2.3.4:443", "username", "password").Return(fmt.Errorf(expectedErrorMsg))
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", registryMirror, nil)
err := s.Upgrade(ctx, getTinkBundle(), testIP, cluster.KubeconfigFile, "")
assert.EqualError(t, err, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, err)
}
func TestUpdateStackInstallerNoProxyError(t *testing.T) {
var (
mockCtrl = gomock.NewController(t)
docker = mocks.NewMockDocker(mockCtrl)
helm = mocks.NewMockHelm(mockCtrl)
writer = filewritermocks.NewMockFileWriter(mockCtrl)
)
noProxy := []string{
"localhost", ".svc",
}
proxyConfiguration := &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4",
HttpsProxy: "1.2.3.4",
NoProxy: noProxy,
}
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, proxyConfiguration)
s.AddNoProxyIP("2.3.4.5")
noProxy = append(noProxy, "2.3.4.5")
if !reflect.DeepEqual(proxyConfiguration.NoProxy, noProxy) {
t.Fatalf("failed upgrading no proxy list of stack installer")
}
}
func TestUpgradeWithProxy(t *testing.T) {
var (
mockCtrl = gomock.NewController(t)
docker = mocks.NewMockDocker(mockCtrl)
helm = mocks.NewMockHelm(mockCtrl)
folder, writer = test.NewWriter(t)
valuesFile = filepath.Join(folder, "generated", overridesFileName)
cluster = &types.Cluster{Name: "test"}
ctx = context.Background()
)
helm.EXPECT().UpgradeChartWithValuesFile(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
proxyConfiguration := &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4",
HttpsProxy: "1.2.3.4",
NoProxy: []string{
"localhost", ".svc",
},
}
s := stack.NewInstaller(docker, writer, helm, constants.EksaSystemNamespace, "192.168.0.0/16", nil, proxyConfiguration)
err := s.Upgrade(ctx, getTinkBundle(), testIP, cluster.KubeconfigFile, "https://my-local-web-server/hook")
assert.NoError(t, err)
assertYamlFilesEqual(t, "testdata/expected_upgrade.yaml", valuesFile)
}
| 456 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/providers/tinkerbell/stack/stack.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
executables "github.com/aws/eks-anywhere/pkg/executables"
stack "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack"
v1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
// MockDocker is a mock of Docker interface.
type MockDocker struct {
ctrl *gomock.Controller
recorder *MockDockerMockRecorder
}
// MockDockerMockRecorder is the mock recorder for MockDocker.
type MockDockerMockRecorder struct {
mock *MockDocker
}
// NewMockDocker creates a new mock instance.
func NewMockDocker(ctrl *gomock.Controller) *MockDocker {
mock := &MockDocker{ctrl: ctrl}
mock.recorder = &MockDockerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDocker) EXPECT() *MockDockerMockRecorder {
return m.recorder
}
// CheckContainerExistence mocks base method.
func (m *MockDocker) CheckContainerExistence(ctx context.Context, name string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckContainerExistence", ctx, name)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CheckContainerExistence indicates an expected call of CheckContainerExistence.
func (mr *MockDockerMockRecorder) CheckContainerExistence(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckContainerExistence", reflect.TypeOf((*MockDocker)(nil).CheckContainerExistence), ctx, name)
}
// ForceRemove mocks base method.
func (m *MockDocker) ForceRemove(ctx context.Context, name string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ForceRemove", ctx, name)
ret0, _ := ret[0].(error)
return ret0
}
// ForceRemove indicates an expected call of ForceRemove.
func (mr *MockDockerMockRecorder) ForceRemove(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceRemove", reflect.TypeOf((*MockDocker)(nil).ForceRemove), ctx, name)
}
// Run mocks base method.
func (m *MockDocker) Run(ctx context.Context, image, name string, cmd []string, flags ...string) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, image, name, cmd}
for _, a := range flags {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Run", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Run indicates an expected call of Run.
func (mr *MockDockerMockRecorder) Run(ctx, image, name, cmd interface{}, flags ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, image, name, cmd}, flags...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockDocker)(nil).Run), varargs...)
}
// MockHelm is a mock of Helm interface.
type MockHelm struct {
ctrl *gomock.Controller
recorder *MockHelmMockRecorder
}
// MockHelmMockRecorder is the mock recorder for MockHelm.
type MockHelmMockRecorder struct {
mock *MockHelm
}
// NewMockHelm creates a new mock instance.
func NewMockHelm(ctrl *gomock.Controller) *MockHelm {
mock := &MockHelm{ctrl: ctrl}
mock.recorder = &MockHelmMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockHelm) EXPECT() *MockHelmMockRecorder {
return m.recorder
}
// InstallChartWithValuesFile mocks base method.
func (m *MockHelm) InstallChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallChartWithValuesFile", ctx, chart, ociURI, version, kubeconfigFilePath, valuesFilePath)
ret0, _ := ret[0].(error)
return ret0
}
// InstallChartWithValuesFile indicates an expected call of InstallChartWithValuesFile.
func (mr *MockHelmMockRecorder) InstallChartWithValuesFile(ctx, chart, ociURI, version, kubeconfigFilePath, valuesFilePath interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallChartWithValuesFile", reflect.TypeOf((*MockHelm)(nil).InstallChartWithValuesFile), ctx, chart, ociURI, version, kubeconfigFilePath, valuesFilePath)
}
// RegistryLogin mocks base method.
func (m *MockHelm) RegistryLogin(ctx context.Context, endpoint, username, password string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegistryLogin", ctx, endpoint, username, password)
ret0, _ := ret[0].(error)
return ret0
}
// RegistryLogin indicates an expected call of RegistryLogin.
func (mr *MockHelmMockRecorder) RegistryLogin(ctx, endpoint, username, password interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegistryLogin", reflect.TypeOf((*MockHelm)(nil).RegistryLogin), ctx, endpoint, username, password)
}
// UpgradeChartWithValuesFile mocks base method.
func (m *MockHelm) UpgradeChartWithValuesFile(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, valuesFilePath string, opts ...executables.HelmOpt) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, chart, ociURI, version, kubeconfigFilePath, valuesFilePath}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpgradeChartWithValuesFile", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// UpgradeChartWithValuesFile indicates an expected call of UpgradeChartWithValuesFile.
func (mr *MockHelmMockRecorder) UpgradeChartWithValuesFile(ctx, chart, ociURI, version, kubeconfigFilePath, valuesFilePath interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, chart, ociURI, version, kubeconfigFilePath, valuesFilePath}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeChartWithValuesFile", reflect.TypeOf((*MockHelm)(nil).UpgradeChartWithValuesFile), varargs...)
}
// MockStackInstaller is a mock of StackInstaller interface.
type MockStackInstaller struct {
ctrl *gomock.Controller
recorder *MockStackInstallerMockRecorder
}
// MockStackInstallerMockRecorder is the mock recorder for MockStackInstaller.
type MockStackInstallerMockRecorder struct {
mock *MockStackInstaller
}
// NewMockStackInstaller creates a new mock instance.
func NewMockStackInstaller(ctrl *gomock.Controller) *MockStackInstaller {
mock := &MockStackInstaller{ctrl: ctrl}
mock.recorder = &MockStackInstallerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockStackInstaller) EXPECT() *MockStackInstallerMockRecorder {
return m.recorder
}
// AddNoProxyIP mocks base method.
func (m *MockStackInstaller) AddNoProxyIP(IP string) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "AddNoProxyIP", IP)
}
// AddNoProxyIP indicates an expected call of AddNoProxyIP.
func (mr *MockStackInstallerMockRecorder) AddNoProxyIP(IP interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNoProxyIP", reflect.TypeOf((*MockStackInstaller)(nil).AddNoProxyIP), IP)
}
// CleanupLocalBoots mocks base method.
func (m *MockStackInstaller) CleanupLocalBoots(ctx context.Context, forceCleanup bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CleanupLocalBoots", ctx, forceCleanup)
ret0, _ := ret[0].(error)
return ret0
}
// CleanupLocalBoots indicates an expected call of CleanupLocalBoots.
func (mr *MockStackInstallerMockRecorder) CleanupLocalBoots(ctx, forceCleanup interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupLocalBoots", reflect.TypeOf((*MockStackInstaller)(nil).CleanupLocalBoots), ctx, forceCleanup)
}
// GetNamespace mocks base method.
func (m *MockStackInstaller) GetNamespace() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetNamespace")
ret0, _ := ret[0].(string)
return ret0
}
// GetNamespace indicates an expected call of GetNamespace.
func (mr *MockStackInstallerMockRecorder) GetNamespace() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockStackInstaller)(nil).GetNamespace))
}
// Install mocks base method.
func (m *MockStackInstaller) Install(ctx context.Context, bundle v1alpha1.TinkerbellBundle, tinkerbellIP, kubeconfig, hookOverride string, opts ...stack.InstallOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, bundle, tinkerbellIP, kubeconfig, hookOverride}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Install", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Install indicates an expected call of Install.
func (mr *MockStackInstallerMockRecorder) Install(ctx, bundle, tinkerbellIP, kubeconfig, hookOverride interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, bundle, tinkerbellIP, kubeconfig, hookOverride}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockStackInstaller)(nil).Install), varargs...)
}
// UninstallLocal mocks base method.
func (m *MockStackInstaller) UninstallLocal(ctx context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UninstallLocal", ctx)
ret0, _ := ret[0].(error)
return ret0
}
// UninstallLocal indicates an expected call of UninstallLocal.
func (mr *MockStackInstallerMockRecorder) UninstallLocal(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UninstallLocal", reflect.TypeOf((*MockStackInstaller)(nil).UninstallLocal), ctx)
}
// Upgrade mocks base method.
func (m *MockStackInstaller) Upgrade(arg0 context.Context, arg1 v1alpha1.TinkerbellBundle, tinkerbellIP, kubeconfig, hookOverride string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", arg0, arg1, tinkerbellIP, kubeconfig, hookOverride)
ret0, _ := ret[0].(error)
return ret0
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockStackInstallerMockRecorder) Upgrade(arg0, arg1, tinkerbellIP, kubeconfig, hookOverride interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockStackInstaller)(nil).Upgrade), arg0, arg1, tinkerbellIP, kubeconfig, hookOverride)
}
| 267 |
eks-anywhere | aws | Go | package validator
import (
"github.com/pkg/errors"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/networkutils"
)
// IPValidator defines the struct for control plane IP validations.
type IPValidator struct {
netClient networkutils.NetClient
}
// IPValidatorOpt is the type for optional IPValidator configurations.
type IPValidatorOpt func(e *IPValidator)
// CustomNetClient passes in a custom net client to the IPValidator.
func CustomNetClient(netClient networkutils.NetClient) IPValidatorOpt {
return func(d *IPValidator) {
d.netClient = netClient
}
}
// NewIPValidator initializes a new IPValidator object.
func NewIPValidator(opts ...IPValidatorOpt) *IPValidator {
v := &IPValidator{
netClient: &networkutils.DefaultNetClient{},
}
for _, opt := range opts {
opt(v)
}
return v
}
// ValidateControlPlaneIPUniqueness checks whether or not the control plane endpoint defined
// in the cluster spec is available.
func (v *IPValidator) ValidateControlPlaneIPUniqueness(cluster *v1alpha1.Cluster) error {
ip := cluster.Spec.ControlPlaneConfiguration.Endpoint.Host
if networkutils.IsIPInUse(v.netClient, ip) {
return errors.Errorf("cluster controlPlaneConfiguration.Endpoint.Host <%s> is already in use, control plane IP must be unique", ip)
}
return nil
}
| 46 |
eks-anywhere | aws | Go | package validator_test
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/networkutils/mocks"
"github.com/aws/eks-anywhere/pkg/providers/validator"
)
func TestValidateControlPlaneIPUniqueness(t *testing.T) {
g := NewWithT(t)
cluster := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Endpoint: &v1alpha1.Endpoint{
Host: "1.2.3.4",
},
},
},
}
ctrl := gomock.NewController(t)
client := mocks.NewMockNetClient(ctrl)
client.EXPECT().DialTimeout(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil, errors.New("no connection"))
ipValidator := validator.NewIPValidator(validator.CustomNetClient(client))
g.Expect(ipValidator.ValidateControlPlaneIPUniqueness(cluster)).To(Succeed())
}
| 34 |
eks-anywhere | aws | Go | package vsphere
import (
"context"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
yamlcapi "github.com/aws/eks-anywhere/pkg/clusterapi/yaml"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
// BaseControlPlane represents a CAPI VSphere control plane.
type BaseControlPlane = clusterapi.ControlPlane[*vspherev1.VSphereCluster, *vspherev1.VSphereMachineTemplate]
// ControlPlane holds the VSphere specific objects for a CAPI VSphere control plane.
type ControlPlane struct {
BaseControlPlane
Secrets []*corev1.Secret
ConfigMaps []*corev1.ConfigMap
ClusterResourceSets []*addonsv1.ClusterResourceSet
}
// Objects returns the control plane objects associated with the VSphere cluster.
func (p ControlPlane) Objects() []kubernetes.Object {
o := p.BaseControlPlane.Objects()
o = getSecrets(o, p.Secrets)
o = getConfigMaps(o, p.ConfigMaps)
o = getClusterResourceSets(o, p.ClusterResourceSets)
return o
}
// ControlPlaneBuilder defines the builder for all objects in the CAPI VSphere control plane.
type ControlPlaneBuilder struct {
BaseBuilder *yamlcapi.ControlPlaneBuilder[*vspherev1.VSphereCluster, *vspherev1.VSphereMachineTemplate]
ControlPlane *ControlPlane
}
// BuildFromParsed implements the base yamlcapi.BuildFromParsed and processes any additional objects for the VSphere control plane.
func (b *ControlPlaneBuilder) BuildFromParsed(lookup yamlutil.ObjectLookup) error {
if err := b.BaseBuilder.BuildFromParsed(lookup); err != nil {
return err
}
b.ControlPlane.BaseControlPlane = *b.BaseBuilder.ControlPlane
processObjects(b.ControlPlane, lookup)
return nil
}
// ControlPlaneSpec builds a vsphere ControlPlane definition based on an eks-a cluster spec.
func ControlPlaneSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*ControlPlane, error) {
templateBuilder := NewVsphereTemplateBuilder(time.Now)
controlPlaneYaml, err := templateBuilder.GenerateCAPISpecControlPlane(
spec,
func(values map[string]interface{}) {
values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster)
values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(spec.Cluster)
},
)
if err != nil {
return nil, errors.Wrap(err, "generating vsphere control plane yaml spec")
}
parser, builder, err := newControlPlaneParser(logger)
if err != nil {
return nil, err
}
err = parser.Parse(controlPlaneYaml, builder)
if err != nil {
return nil, errors.Wrap(err, "parsing vsphere control plane yaml")
}
cp := builder.ControlPlane
if err = cp.UpdateImmutableObjectNames(ctx, client, getMachineTemplate, machineTemplateEqual); err != nil {
return nil, errors.Wrap(err, "updating vsphere immutable object names")
}
return cp, nil
}
func newControlPlaneParser(logger logr.Logger) (*yamlutil.Parser, *ControlPlaneBuilder, error) {
parser, baseBuilder, err := yamlcapi.NewControlPlaneParserAndBuilder(
logger,
yamlutil.NewMapping(
"VSphereCluster",
func() *vspherev1.VSphereCluster {
return &vspherev1.VSphereCluster{}
},
),
yamlutil.NewMapping(
"VSphereMachineTemplate",
func() *vspherev1.VSphereMachineTemplate {
return &vspherev1.VSphereMachineTemplate{}
},
),
)
if err != nil {
return nil, nil, errors.Wrap(err, "building vsphere control plane parser")
}
err = parser.RegisterMappings(
yamlutil.NewMapping(constants.SecretKind, func() yamlutil.APIObject {
return &corev1.Secret{}
}),
yamlutil.NewMapping(constants.ConfigMapKind, func() yamlutil.APIObject {
return &corev1.ConfigMap{}
}),
yamlutil.NewMapping(constants.ClusterResourceSetKind, func() yamlutil.APIObject {
return &addonsv1.ClusterResourceSet{}
}),
)
if err != nil {
return nil, nil, errors.Wrap(err, "registering vsphere control plane mappings in parser")
}
builder := &ControlPlaneBuilder{
BaseBuilder: baseBuilder,
ControlPlane: &ControlPlane{},
}
return parser, builder, nil
}
func processObjects(c *ControlPlane, lookup yamlutil.ObjectLookup) {
for _, obj := range lookup {
switch obj.GetObjectKind().GroupVersionKind().Kind {
case constants.SecretKind:
c.Secrets = append(c.Secrets, obj.(*corev1.Secret))
case constants.ConfigMapKind:
c.ConfigMaps = append(c.ConfigMaps, obj.(*corev1.ConfigMap))
case constants.ClusterResourceSetKind:
c.ClusterResourceSets = append(c.ClusterResourceSets, obj.(*addonsv1.ClusterResourceSet))
}
}
}
func getSecrets(o []kubernetes.Object, secrets []*corev1.Secret) []kubernetes.Object {
for _, s := range secrets {
o = append(o, s)
}
return o
}
func getConfigMaps(o []kubernetes.Object, configMaps []*corev1.ConfigMap) []kubernetes.Object {
for _, m := range configMaps {
o = append(o, m)
}
return o
}
func getClusterResourceSets(o []kubernetes.Object, clusterResourceSets []*addonsv1.ClusterResourceSet) []kubernetes.Object {
for _, s := range clusterResourceSets {
o = append(o, s)
}
return o
}
| 171 |
eks-anywhere | aws | Go | package vsphere_test
import (
"context"
"testing"
"time"
etcdadmbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
addons "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/registrymirror/containerd"
)
const (
testClusterConfigMainFilename = "testdata/cluster_main.yaml"
)
type baseControlPlane = clusterapi.ControlPlane[*v1beta1.VSphereCluster, *v1beta1.VSphereMachineTemplate]
func TestControlPlaneObjects(t *testing.T) {
tests := []struct {
name string
controlPlane *vsphere.ControlPlane
want []kubernetes.Object
}{
{
name: "stacked etcd",
controlPlane: &vsphere.ControlPlane{
BaseControlPlane: baseControlPlane{
Cluster: capiCluster(),
ProviderCluster: vsphereCluster(),
KubeadmControlPlane: kubeadmControlPlane(),
ControlPlaneMachineTemplate: vsphereMachineTemplate("cp-mt"),
},
Secrets: []*corev1.Secret{secret()},
ConfigMaps: []*corev1.ConfigMap{configMap()},
ClusterResourceSets: []*addons.ClusterResourceSet{clusterResourceSet()},
},
want: []kubernetes.Object{
capiCluster(),
vsphereCluster(),
kubeadmControlPlane(),
vsphereMachineTemplate("cp-mt"),
secret(),
configMap(),
clusterResourceSet(),
},
},
{
name: "unstacked etcd",
controlPlane: &vsphere.ControlPlane{
BaseControlPlane: baseControlPlane{
Cluster: capiCluster(),
ProviderCluster: vsphereCluster(),
KubeadmControlPlane: kubeadmControlPlane(),
ControlPlaneMachineTemplate: vsphereMachineTemplate("cp-mt"),
EtcdCluster: etcdCluster(),
EtcdMachineTemplate: vsphereMachineTemplate("etcd-mt"),
},
Secrets: []*corev1.Secret{secret()},
ConfigMaps: []*corev1.ConfigMap{configMap()},
ClusterResourceSets: []*addons.ClusterResourceSet{clusterResourceSet()},
},
want: []kubernetes.Object{
capiCluster(),
vsphereCluster(),
kubeadmControlPlane(),
vsphereMachineTemplate("cp-mt"),
etcdCluster(),
vsphereMachineTemplate("etcd-mt"),
secret(),
configMap(),
clusterResourceSet(),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.controlPlane.Objects()).To(ConsistOf(tt.want))
})
}
}
func TestControlPlaneSpecNewCluster(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigMainFilename)
cp, err := vsphere.ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane()))
g.Expect(cp.EtcdCluster).To(Equal(etcdCluster()))
g.Expect(cp.ProviderCluster).To(Equal(vsphereCluster()))
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("test-control-plane-1"))
g.Expect(cp.EtcdMachineTemplate.Name).To(Equal("test-etcd-1"))
}
func TestControlPlaneSpecNoKubeVersion(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigMainFilename)
spec.Cluster.Spec.KubernetesVersion = ""
_, err := vsphere.ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).To(MatchError(ContainSubstring("generating vsphere control plane yaml spec")))
}
func TestControlPlaneSpecUpdateMachineTemplates(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, testClusterConfigMainFilename)
originalKubeadmControlPlane := kubeadmControlPlane()
originalEtcdCluster := etcdCluster()
originalEtcdCluster.Spec.InfrastructureTemplate.Name = "test-etcd-2"
originalCPMachineTemplate := vsphereMachineTemplate("test-control-plane-1")
originalEtcdMachineTemplate := vsphereMachineTemplate("test-etcd-2")
wantKCP := originalKubeadmControlPlane.DeepCopy()
wantEtcd := originalEtcdCluster.DeepCopy()
wantCPtemplate := originalCPMachineTemplate.DeepCopy()
wantEtcdTemplate := originalEtcdMachineTemplate.DeepCopy()
client := test.NewFakeKubeClient(
originalKubeadmControlPlane,
originalEtcdCluster,
originalCPMachineTemplate,
originalEtcdMachineTemplate,
)
cpTaints := []corev1.Taint{
{
Key: "foo",
Value: "bar",
Effect: "PreferNoSchedule",
},
}
spec.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints
spec.VSphereMachineConfigs["test-etcd"].Spec.Datastore = "new-datastore"
wantKCP.Spec.MachineTemplate.InfrastructureRef.Name = "test-control-plane-2"
wantKCP.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.Taints = cpTaints
wantKCP.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.Taints = cpTaints
wantEtcd.Spec.InfrastructureTemplate.Name = "test-etcd-3"
wantCPtemplate.Name = "test-control-plane-2"
wantCPtemplate.Spec.Template.Spec.NumCPUs = 2
wantCPtemplate.Spec.Template.Spec.MemoryMiB = 8192
wantEtcdTemplate.Name = "test-etcd-3"
wantEtcdTemplate.Spec.Template.Spec.Datastore = "new-datastore"
cp, err := vsphere.ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(wantKCP))
g.Expect(cp.EtcdCluster).To(Equal(wantEtcd))
g.Expect(cp.ProviderCluster).To(Equal(vsphereCluster()))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(wantCPtemplate))
g.Expect(cp.EtcdMachineTemplate).To(Equal(wantEtcdTemplate))
}
func TestControlPlaneSpecNoChangesMachineTemplates(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, testClusterConfigMainFilename)
originalKubeadmControlPlane := kubeadmControlPlane()
originalEtcdCluster := etcdCluster()
originalEtcdCluster.Spec.InfrastructureTemplate.Name = "test-etcd-1"
originalCPMachineTemplate := vsphereMachineTemplate("test-control-plane-1")
originalCPMachineTemplate.Spec.Template.Spec.NumCPUs = 2
originalCPMachineTemplate.Spec.Template.Spec.MemoryMiB = 8192
originalEtcdMachineTemplate := vsphereMachineTemplate("test-etcd-1")
wantKCP := originalKubeadmControlPlane.DeepCopy()
wantEtcd := originalEtcdCluster.DeepCopy()
wantCPtemplate := originalCPMachineTemplate.DeepCopy()
wantEtcdTemplate := originalEtcdMachineTemplate.DeepCopy()
// This mimics what would happen if the objects were returned by a real api server
// It helps make sure that the immutable object comparison is able to deal with these
// kind of changes.
originalCPMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now())
originalEtcdMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now())
// This is testing defaults. We don't set Snapshot in our machine templates,
// but it's possible that some default logic does. We need to take this into
// consideration when checking for equality.
originalCPMachineTemplate.Spec.Template.Spec.Snapshot = "current"
originalEtcdMachineTemplate.Spec.Template.Spec.Snapshot = "current"
client := test.NewFakeKubeClient(
originalKubeadmControlPlane,
originalEtcdCluster,
originalCPMachineTemplate,
originalEtcdMachineTemplate,
)
cp, err := vsphere.ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(wantKCP))
g.Expect(cp.EtcdCluster).To(Equal(wantEtcd))
g.Expect(cp.ProviderCluster).To(Equal(vsphereCluster()))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(wantCPtemplate))
g.Expect(cp.EtcdMachineTemplate).To(Equal(wantEtcdTemplate))
}
func TestControlPlaneSpecErrorFromClient(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, testClusterConfigMainFilename)
client := test.NewFakeKubeClientAlwaysError()
_, err := vsphere.ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).To(MatchError(ContainSubstring("updating vsphere immutable object names")))
}
func TestControlPlaneSpecRegistryMirrorConfiguration(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
client := test.NewFakeKubeClient()
spec := test.NewFullClusterSpec(t, testClusterConfigMainFilename)
tests := []struct {
name string
mirrorConfig *anywherev1.RegistryMirrorConfiguration
files []bootstrapv1.File
}{
{
name: "insecure skip verify",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerify(),
},
{
name: "insecure skip verify with ca cert",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
spec.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig
cp, err := vsphere.ControlPlaneSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(cp).NotTo(BeNil())
g.Expect(cp.Cluster).To(Equal(capiCluster()))
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Spec.KubeadmConfigSpec.Files = append(kcp.Spec.KubeadmConfigSpec.Files, tt.files...)
kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(test.RegistryMirrorSudoPreKubeadmCommands(), kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands...)
})))
g.Expect(cp.EtcdCluster.Spec.EtcdadmConfigSpec.RegistryMirror).To(Equal(etcdCluster(func(ec *etcdv1.EtcdadmCluster) {
ec.Spec.EtcdadmConfigSpec.RegistryMirror = &etcdadmbootstrapv1.RegistryMirrorConfiguration{
Endpoint: containerd.ToAPIEndpoint(registrymirror.FromClusterRegistryMirrorConfiguration(tt.mirrorConfig).CoreEKSAMirror()),
CACert: tt.mirrorConfig.CACertContent,
}
}).Spec.EtcdadmConfigSpec.RegistryMirror))
g.Expect(cp.ProviderCluster).To(Equal(vsphereCluster()))
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("test-control-plane-1"))
g.Expect(cp.EtcdMachineTemplate.Name).To(Equal("test-etcd-1"))
})
}
}
func capiCluster() *clusterv1.Cluster {
return &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "eksa-system",
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "test",
},
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetwork{
APIServerPort: nil,
Services: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"10.96.0.0/12"},
},
Pods: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"192.168.0.0/16"},
},
},
ControlPlaneRef: &corev1.ObjectReference{
Kind: "KubeadmControlPlane",
Name: "test",
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
ManagedExternalEtcdRef: &corev1.ObjectReference{
Kind: "EtcdadmCluster",
Name: "test-etcd",
APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1",
Namespace: "eksa-system",
},
InfrastructureRef: &corev1.ObjectReference{
Kind: "VSphereCluster",
Name: "test",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
},
}
}
func vsphereCluster() *v1beta1.VSphereCluster {
return &v1beta1.VSphereCluster{
TypeMeta: metav1.TypeMeta{
Kind: "VSphereCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: constants.EksaSystemNamespace,
},
Spec: v1beta1.VSphereClusterSpec{
Server: "vsphere_server",
Thumbprint: "ABCDEFG",
ControlPlaneEndpoint: v1beta1.APIEndpoint{
Host: "1.2.3.4",
Port: 6443,
},
IdentityRef: &v1beta1.VSphereIdentityReference{
Kind: "Secret",
Name: "test-vsphere-credentials",
},
},
}
}
func vsphereMachineTemplate(name string) *v1beta1.VSphereMachineTemplate {
return &v1beta1.VSphereMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "VSphereMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.EksaSystemNamespace,
},
Spec: v1beta1.VSphereMachineTemplateSpec{
Template: v1beta1.VSphereMachineTemplateResource{
Spec: v1beta1.VSphereMachineSpec{
VirtualMachineCloneSpec: v1beta1.VirtualMachineCloneSpec{
Template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6",
CloneMode: "linkedClone",
Server: "vsphere_server",
Thumbprint: "ABCDEFG",
Datacenter: "SDDC-Datacenter",
Folder: "/SDDC-Datacenter/vm",
Datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore",
StoragePolicyName: "vSAN Default Storage Policy",
ResourcePool: "*/Resources",
Network: v1beta1.NetworkSpec{
Devices: []v1beta1.NetworkDeviceSpec{
{
NetworkName: "/SDDC-Datacenter/network/sddc-cgw-network-1",
DHCP4: true,
},
},
},
NumCPUs: 3,
MemoryMiB: 4096,
DiskGiB: 25,
},
},
},
},
}
}
func secret() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: "my-secret",
},
Data: map[string][]byte{
"username": []byte("test"),
"password": []byte("test"),
},
}
}
func configMap() *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: "my-configmap",
},
Data: map[string]string{
"foo": "bar",
},
}
}
func clusterResourceSet() *addons.ClusterResourceSet {
return &addons.ClusterResourceSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "addons.cluster.x-k8s.io/v1beta1",
Kind: "ClusterResourceSet",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: "my-crs",
},
}
}
func kubeadmControlPlane(opts ...func(*controlplanev1.KubeadmControlPlane)) *controlplanev1.KubeadmControlPlane {
var kcp *controlplanev1.KubeadmControlPlane
b := []byte(`apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: test
namespace: eksa-system
spec:
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
name: test-control-plane-1
kubeadmConfigSpec:
clusterConfiguration:
imageRepository: public.ecr.aws/eks-distro/kubernetes
etcd:
external:
endpoints: []
caFile: "/etc/kubernetes/pki/etcd/ca.crt"
certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt"
keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key"
dns:
imageRepository: public.ecr.aws/eks-distro/coredns
imageTag: v1.8.0-eks-1-19-4
apiServer:
extraArgs:
cloud-provider: external
audit-policy-file: /etc/kubernetes/audit-policy.yaml
audit-log-path: /var/log/kubernetes/api-audit.log
audit-log-maxage: "30"
audit-log-maxbackup: "10"
audit-log-maxsize: "512"
profiling: "false"
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
extraVolumes:
- hostPath: /etc/kubernetes/audit-policy.yaml
mountPath: /etc/kubernetes/audit-policy.yaml
name: audit-policy
pathType: File
readOnly: true
- hostPath: /var/log/kubernetes
mountPath: /var/log/kubernetes
name: audit-log-dir
pathType: DirectoryOrCreate
readOnly: false
controllerManager:
extraArgs:
cloud-provider: external
profiling: "false"
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
scheduler:
extraArgs:
profiling: "false"
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
files:
- content: |
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
- name: address
value: 1.2.3.4
image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig
status: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
- content: |
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
# Log aws-auth configmap changes
- level: RequestResponse
namespaces: ["kube-system"]
verbs: ["update", "patch", "delete"]
resources:
- group: "" # core
resources: ["configmaps"]
resourceNames: ["aws-auth"]
omitStages:
- "RequestReceived"
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
- level: Request
resources:
- group: ""
resources: ["serviceaccounts/token"]
# Get repsonses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
owner: root:root
path: /etc/kubernetes/audit-policy.yaml
initConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
read-only-port: "0"
anonymous-auth: "false"
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
name: '{{ ds.meta_data.hostname }}'
joinConfiguration:
nodeRegistration:
criSocket: /var/run/containerd/containerd.sock
kubeletExtraArgs:
cloud-provider: external
read-only-port: "0"
anonymous-auth: "false"
tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
name: '{{ ds.meta_data.hostname }}'
preKubeadmCommands:
- hostname "{{ ds.meta_data.hostname }}"
- echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts
- echo "127.0.0.1 localhost" >>/etc/hosts
- echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts
- echo "{{ ds.meta_data.hostname }}" >/etc/hostname
useExperimentalRetryJoin: true
users:
- name: capv
sshAuthorizedKeys:
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=='
sudo: ALL=(ALL) NOPASSWD:ALL
format: cloud-config
replicas: 3
version: v1.19.8-eks-1-19-4`)
if err := yaml.UnmarshalStrict(b, &kcp); err != nil {
return nil
}
for _, opt := range opts {
opt(kcp)
}
return kcp
}
func etcdCluster(opts ...func(*etcdv1.EtcdadmCluster)) *etcdv1.EtcdadmCluster {
var etcdCluster *etcdv1.EtcdadmCluster
b := []byte(`kind: EtcdadmCluster
apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1
metadata:
name: test-etcd
namespace: eksa-system
spec:
replicas: 3
etcdadmConfigSpec:
etcdadmBuiltin: true
format: cloud-config
cloudInitConfig:
version: 3.4.14
installDir: "/usr/bin"
preEtcdadmCommands:
- hostname "{{ ds.meta_data.hostname }}"
- echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts
- echo "127.0.0.1 localhost" >>/etc/hosts
- echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts
- echo "{{ ds.meta_data.hostname }}" >/etc/hostname
cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
users:
- name: capv
sshAuthorizedKeys:
- 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=='
sudo: ALL=(ALL) NOPASSWD:ALL
infrastructureTemplate:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: VSphereMachineTemplate
name: test-etcd-1`)
if err := yaml.UnmarshalStrict(b, &etcdCluster); err != nil {
return nil
}
for _, opt := range opts {
opt(etcdCluster)
}
return etcdCluster
}
| 796 |
eks-anywhere | aws | Go | package vsphere
import (
"context"
"fmt"
"path/filepath"
"strings"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/internal/templates"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const minDiskGib int = 20
type Defaulter struct {
govc ProviderGovcClient
}
func NewDefaulter(govc ProviderGovcClient) *Defaulter {
return &Defaulter{
govc: govc,
}
}
func (d *Defaulter) setDefaultsForMachineConfig(ctx context.Context, spec *Spec) error {
setDefaultsForEtcdMachineConfig(spec.etcdMachineConfig())
for _, m := range spec.machineConfigs() {
m.SetDefaults()
m.SetUserDefaults()
if err := d.setDefaultTemplateIfMissing(ctx, spec, m); err != nil {
return err
}
if err := d.setTemplateFullPath(ctx, spec.VSphereDatacenter, m); err != nil {
return err
}
if err := d.setCloneModeAndDiskSizeDefaults(ctx, m, spec.VSphereDatacenter.Spec.Datacenter); err != nil {
return err
}
}
return nil
}
func (d *Defaulter) SetDefaultsForDatacenterConfig(ctx context.Context, datacenterConfig *anywherev1.VSphereDatacenterConfig) error {
datacenterConfig.SetDefaults()
if datacenterConfig.Spec.Thumbprint != "" {
if err := d.govc.ConfigureCertThumbprint(ctx, datacenterConfig.Spec.Server, datacenterConfig.Spec.Thumbprint); err != nil {
return fmt.Errorf("failed configuring govc cert thumbprint: %v", err)
}
}
return nil
}
func setDefaultsForEtcdMachineConfig(machineConfig *anywherev1.VSphereMachineConfig) {
if machineConfig != nil && machineConfig.Spec.MemoryMiB < 8192 {
logger.Info("Warning: VSphereMachineConfig MemoryMiB for etcd machines should not be less than 8192. Defaulting to 8192")
machineConfig.Spec.MemoryMiB = 8192
}
}
func (d *Defaulter) setDefaultTemplateIfMissing(ctx context.Context, spec *Spec, machineConfig *anywherev1.VSphereMachineConfig) error {
if machineConfig.Spec.Template == "" {
logger.V(1).Info("Control plane VSphereMachineConfig template is not set. Using default template.")
if err := d.setupDefaultTemplate(ctx, spec, machineConfig); err != nil {
return err
}
}
return nil
}
func (d *Defaulter) setupDefaultTemplate(ctx context.Context, spec *Spec, machineConfig *anywherev1.VSphereMachineConfig) error {
osFamily := machineConfig.Spec.OSFamily
eksd := spec.VersionsBundle.EksD
var ova releasev1.Archive
switch osFamily {
case anywherev1.Bottlerocket:
ova = eksd.Ova.Bottlerocket
default:
return fmt.Errorf("can not import ova for osFamily: %s, please use %s as osFamily for auto-importing or provide a valid template", osFamily, anywherev1.Bottlerocket)
}
templateName := fmt.Sprintf("%s-%s-%s-%s-%s", osFamily, eksd.KubeVersion, eksd.Name, strings.Join(ova.Arch, "-"), ova.SHA256[:7])
machineConfig.Spec.Template = filepath.Join("/", spec.VSphereDatacenter.Spec.Datacenter, defaultTemplatesFolder, templateName)
tags := requiredTemplateTagsByCategory(spec.Spec, machineConfig)
// TODO: figure out if it's worth refactoring the factory to be able to reuse across machine configs.
templateFactory := templates.NewFactory(d.govc, spec.VSphereDatacenter.Spec.Datacenter, machineConfig.Spec.Datastore, spec.VSphereDatacenter.Spec.Network, machineConfig.Spec.ResourcePool, defaultTemplateLibrary)
// TODO: remove the factory's dependency on a machineConfig
if err := templateFactory.CreateIfMissing(ctx, spec.VSphereDatacenter.Spec.Datacenter, machineConfig, ova.URI, tags); err != nil {
return err
}
return nil
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func (d *Defaulter) setCloneModeAndDiskSizeDefaults(ctx context.Context, machineConfig *anywherev1.VSphereMachineConfig, datacenter string) error {
templateDiskSize, err := d.govc.GetVMDiskSizeInGB(ctx, machineConfig.Spec.Template, datacenter)
if err != nil {
return fmt.Errorf("getting disk size for template %s: %v", machineConfig.Spec.Template, err)
}
minDiskSize := max(minDiskGib, templateDiskSize)
if machineConfig.Spec.DiskGiB < minDiskSize {
errStr := fmt.Sprintf("Warning: VSphereMachineConfig DiskGiB cannot be less than %v. Defaulting to %v.", minDiskSize, minDiskSize)
logger.Info(errStr)
machineConfig.Spec.DiskGiB = minDiskSize
}
templateHasSnapshot, err := d.govc.TemplateHasSnapshot(ctx, machineConfig.Spec.Template)
if err != nil {
return fmt.Errorf("getting template snapshot details: %v", err)
}
if machineConfig.Spec.CloneMode == anywherev1.FullClone {
return nil
}
if machineConfig.Spec.CloneMode == anywherev1.LinkedClone {
return validateMachineWithLinkedCloneMode(templateHasSnapshot, templateDiskSize, machineConfig)
}
if machineConfig.Spec.CloneMode == "" {
return validateMachineWithNoCloneMode(templateHasSnapshot, templateDiskSize, machineConfig)
}
return fmt.Errorf("cloneMode %s is not supported for VSphereMachineConfig %s. Supported clone modes: [%s, %s]", machineConfig.Spec.CloneMode, machineConfig.Name, anywherev1.LinkedClone, anywherev1.FullClone)
}
func validateMachineWithNoCloneMode(templateHasSnapshot bool, templateDiskSize int, machineConfig *anywherev1.VSphereMachineConfig) error {
if templateHasSnapshot && machineConfig.Spec.DiskGiB == templateDiskSize {
logger.V(3).Info("CloneMode not set, defaulting to linkedClone", "VSphereMachineConfig", machineConfig.Name)
machineConfig.Spec.CloneMode = anywherev1.LinkedClone
} else {
logger.V(3).Info("CloneMode not set, defaulting to fullClone", "VSphereMachineConfig", machineConfig.Name)
machineConfig.Spec.CloneMode = anywherev1.FullClone
}
return nil
}
func validateMachineWithLinkedCloneMode(templateHasSnapshot bool, templateDiskSize int, machineConfig *anywherev1.VSphereMachineConfig) error {
if !templateHasSnapshot {
return fmt.Errorf(
"cannot use 'linkedClone' for VSphereMachineConfig '%s' because its template (%s) has no snapshots; create snapshots or change the cloneMode to 'fullClone'",
machineConfig.Name,
machineConfig.Spec.Template,
)
}
if machineConfig.Spec.DiskGiB != templateDiskSize {
return fmt.Errorf(
"diskGiB cannot be customized for VSphereMachineConfig '%s' when using 'linkedClone'; change the cloneMode to 'fullClone' or the diskGiB to match the template's (%s) disk size of %d GiB",
machineConfig.Name,
machineConfig.Spec.Template,
templateDiskSize,
)
}
return nil
}
func (d *Defaulter) setTemplateFullPath(ctx context.Context,
datacenterConfig *anywherev1.VSphereDatacenterConfig,
machine *anywherev1.VSphereMachineConfig,
) error {
templateFullPath, err := d.govc.SearchTemplate(ctx, datacenterConfig.Spec.Datacenter, machine.Spec.Template)
if err != nil {
return fmt.Errorf("setting template full path: %v", err)
}
if len(templateFullPath) <= 0 {
return fmt.Errorf("template <%s> not found", machine.Spec.Template)
}
machine.Spec.Template = templateFullPath
return nil
}
| 192 |
eks-anywhere | aws | Go | package vsphere
import (
"fmt"
"os"
"strconv"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
)
func SetupEnvVars(datacenterConfig *anywherev1.VSphereDatacenterConfig) error {
if vSphereUsername, ok := os.LookupEnv(config.EksavSphereUsernameKey); ok && len(vSphereUsername) > 0 {
if err := os.Setenv(vSphereUsernameKey, vSphereUsername); err != nil {
return fmt.Errorf("unable to set %s: %v", config.EksavSphereUsernameKey, err)
}
} else {
return fmt.Errorf("%s is not set or is empty", config.EksavSphereUsernameKey)
}
if vSpherePassword, ok := os.LookupEnv(config.EksavSpherePasswordKey); ok && len(vSpherePassword) > 0 {
if err := os.Setenv(vSpherePasswordKey, vSpherePassword); err != nil {
return fmt.Errorf("unable to set %s: %v", config.EksavSpherePasswordKey, err)
}
} else {
return fmt.Errorf("%s is not set or is empty", config.EksavSpherePasswordKey)
}
if err := os.Setenv(vSphereServerKey, datacenterConfig.Spec.Server); err != nil {
return fmt.Errorf("unable to set %s: %v", vSphereServerKey, err)
}
if err := os.Setenv(expClusterResourceSetKey, "true"); err != nil {
return fmt.Errorf("unable to set %s: %v", expClusterResourceSetKey, err)
}
// TODO: move this somewhere else since it's not vSphere specific
if _, ok := os.LookupEnv(eksaLicense); !ok {
if err := os.Setenv(eksaLicense, ""); err != nil {
return fmt.Errorf("unable to set %s: %v", eksaLicense, err)
}
}
if err := os.Setenv(govcInsecure, strconv.FormatBool(datacenterConfig.Spec.Insecure)); err != nil {
return fmt.Errorf("unable to set %s: %v", govcInsecure, err)
}
if err := os.Setenv(govcDatacenterKey, datacenterConfig.Spec.Datacenter); err != nil {
return fmt.Errorf("unable to set %s: %v", govcDatacenterKey, err)
}
return nil
}
| 52 |
eks-anywhere | aws | Go | package vsphere_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
)
func TestSetupEnvVarsErrorDatacenter(t *testing.T) {
config := &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Server: "test",
Insecure: false,
Datacenter: string([]byte{0}),
},
}
if err := vsphere.SetupEnvVars(config); err == nil {
t.Fatal("SetupEnvVars() err = nil, want err not nil")
}
}
| 22 |
eks-anywhere | aws | Go | package vsphere
import (
"context"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/equality"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
)
func getMachineTemplate(ctx context.Context, client kubernetes.Client, name, namespace string) (*vspherev1.VSphereMachineTemplate, error) {
m := &vspherev1.VSphereMachineTemplate{}
if err := client.Get(ctx, name, namespace, m); err != nil {
return nil, errors.Wrap(err, "reading vSphereMachineTemplate")
}
return m, nil
}
func machineTemplateEqual(new, old *vspherev1.VSphereMachineTemplate) bool {
return equality.Semantic.DeepDerivative(new.Spec, old.Spec)
}
| 25 |
eks-anywhere | aws | Go | package vsphere
import (
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
type Spec struct {
*cluster.Spec
}
// NewSpec constructs a new vSphere cluster Spec.
func NewSpec(clusterSpec *cluster.Spec) *Spec {
return &Spec{
Spec: clusterSpec,
}
}
func (s *Spec) controlPlaneMachineConfig() *anywherev1.VSphereMachineConfig {
return controlPlaneMachineConfig(s.Spec)
}
func (s *Spec) workerMachineConfig(c anywherev1.WorkerNodeGroupConfiguration) *anywherev1.VSphereMachineConfig {
return workerMachineConfig(s.Spec, c)
}
func (s *Spec) etcdMachineConfig() *anywherev1.VSphereMachineConfig {
return etcdMachineConfig(s.Spec)
}
func (s *Spec) machineConfigs() []*anywherev1.VSphereMachineConfig {
machineConfigs := make([]*anywherev1.VSphereMachineConfig, 0, len(s.VSphereMachineConfigs))
for _, m := range s.VSphereMachineConfigs {
machineConfigs = append(machineConfigs, m)
}
return machineConfigs
}
func etcdMachineConfig(s *cluster.Spec) *anywherev1.VSphereMachineConfig {
if s.Cluster.Spec.ExternalEtcdConfiguration == nil || s.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef == nil {
return nil
}
return s.VSphereMachineConfigs[s.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name]
}
func controlPlaneMachineConfig(s *cluster.Spec) *anywherev1.VSphereMachineConfig {
return s.VSphereMachineConfigs[s.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
}
func workerMachineConfig(s *cluster.Spec, workers anywherev1.WorkerNodeGroupConfiguration) *anywherev1.VSphereMachineConfig {
return s.VSphereMachineConfigs[workers.MachineGroupRef.Name]
}
| 54 |
eks-anywhere | aws | Go | package vsphere
import (
"fmt"
"strings"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
func requiredTemplateTags(clusterSpec *cluster.Spec, machineConfig *v1alpha1.VSphereMachineConfig) []string {
tagsByCategory := requiredTemplateTagsByCategory(clusterSpec, machineConfig)
tags := make([]string, 0, len(tagsByCategory))
for _, t := range tagsByCategory {
tags = append(tags, t...)
}
return tags
}
func requiredTemplateTagsByCategory(clusterSpec *cluster.Spec, machineConfig *v1alpha1.VSphereMachineConfig) map[string][]string {
osFamily := machineConfig.Spec.OSFamily
return map[string][]string{
"eksdRelease": {fmt.Sprintf("eksdRelease:%s", clusterSpec.VersionsBundle.EksD.Name)},
"os": {fmt.Sprintf("os:%s", strings.ToLower(string(osFamily)))},
}
}
| 28 |
eks-anywhere | aws | Go | package vsphere
import (
"fmt"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/crypto"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/providers/common"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/registrymirror/containerd"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
)
func NewVsphereTemplateBuilder(
now types.NowFunc,
) *VsphereTemplateBuilder {
return &VsphereTemplateBuilder{
now: now,
}
}
type VsphereTemplateBuilder struct {
now types.NowFunc
}
func (vs *VsphereTemplateBuilder) GenerateCAPISpecControlPlane(
clusterSpec *cluster.Spec,
buildOptions ...providers.BuildMapOption,
) (content []byte, err error) {
var etcdMachineSpec anywherev1.VSphereMachineConfigSpec
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdMachineSpec = etcdMachineConfig(clusterSpec).Spec
}
values, err := buildTemplateMapCP(
clusterSpec,
clusterSpec.VSphereDatacenter.Spec,
controlPlaneMachineConfig(clusterSpec).Spec,
etcdMachineSpec,
)
if err != nil {
return nil, err
}
for _, buildOption := range buildOptions {
buildOption(values)
}
bytes, err := templater.Execute(defaultCAPIConfigCP, values)
if err != nil {
return nil, err
}
return bytes, nil
}
func (vs *VsphereTemplateBuilder) isCgroupDriverSystemd(clusterSpec *cluster.Spec) (bool, error) {
bundle := clusterSpec.VersionsBundle
k8sVersion, err := semver.New(bundle.KubeDistro.Kubernetes.Tag)
if err != nil {
return false, fmt.Errorf("parsing kubernetes version %v: %v", bundle.KubeDistro.Kubernetes.Tag, err)
}
if k8sVersion.Major == 1 && k8sVersion.Minor == 21 {
return true, nil
}
return false, nil
}
// CAPIWorkersSpecWithInitialNames generates a yaml spec with the CAPI objects representing the worker
// nodes for a particular eks-a cluster. It uses default initial names (ended in '-1') for the vsphere
// machine templates and kubeadm config templates.
func (vs *VsphereTemplateBuilder) CAPIWorkersSpecWithInitialNames(spec *cluster.Spec) (content []byte, err error) {
machineTemplateNames, kubeadmConfigTemplateNames := clusterapi.InitialTemplateNamesForWorkers(spec)
return vs.GenerateCAPISpecWorkers(spec, machineTemplateNames, kubeadmConfigTemplateNames)
}
func (vs *VsphereTemplateBuilder) GenerateCAPISpecWorkers(
clusterSpec *cluster.Spec,
workloadTemplateNames,
kubeadmconfigTemplateNames map[string]string,
) (content []byte, err error) {
// pin cgroupDriver to systemd for k8s >= 1.21 when generating template in controller
// remove this check once the controller supports order upgrade.
// i.e. control plane, etcd upgrade before worker nodes.
cgroupDriverSystemd, err := vs.isCgroupDriverSystemd(clusterSpec)
if err != nil {
return nil, err
}
workerSpecs := make([][]byte, 0, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
values, err := buildTemplateMapMD(
clusterSpec,
clusterSpec.VSphereDatacenter.Spec,
workerMachineConfig(clusterSpec, workerNodeGroupConfiguration).Spec,
workerNodeGroupConfiguration,
)
if err != nil {
return nil, err
}
values["workloadTemplateName"] = workloadTemplateNames[workerNodeGroupConfiguration.Name]
values["workloadkubeadmconfigTemplateName"] = kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name]
values["cgroupDriverSystemd"] = cgroupDriverSystemd
bytes, err := templater.Execute(defaultClusterConfigMD, values)
if err != nil {
return nil, err
}
workerSpecs = append(workerSpecs, bytes)
}
return templater.AppendYamlResources(workerSpecs...), nil
}
func buildTemplateMapCP(
clusterSpec *cluster.Spec,
datacenterSpec anywherev1.VSphereDatacenterConfigSpec,
controlPlaneMachineSpec, etcdMachineSpec anywherev1.VSphereMachineConfigSpec,
) (map[string]interface{}, error) {
bundle := clusterSpec.VersionsBundle
format := "cloud-config"
etcdExtraArgs := clusterapi.SecureEtcdTlsCipherSuitesExtraArgs()
sharedExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs()
kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs().
Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)).
Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration))
apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig).
Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)).
Append(clusterapi.PodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig)).
Append(sharedExtraArgs)
controllerManagerExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs().
Append(clusterapi.NodeCIDRMaskExtraArgs(&clusterSpec.Cluster.Spec.ClusterNetwork))
vuc := config.NewVsphereUserConfig()
firstControlPlaneMachinesUser := controlPlaneMachineSpec.Users[0]
controlPlaneSSHKey, err := common.StripSshAuthorizedKeyComment(firstControlPlaneMachinesUser.SshAuthorizedKeys[0])
if err != nil {
return nil, fmt.Errorf("formatting ssh key for vsphere control plane template: %v", err)
}
values := map[string]interface{}{
"clusterName": clusterSpec.Cluster.Name,
"controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host,
"controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count,
"kubernetesRepository": bundle.KubeDistro.Kubernetes.Repository,
"kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag,
"etcdRepository": bundle.KubeDistro.Etcd.Repository,
"etcdImageTag": bundle.KubeDistro.Etcd.Tag,
"corednsRepository": bundle.KubeDistro.CoreDNS.Repository,
"corednsVersion": bundle.KubeDistro.CoreDNS.Tag,
"nodeDriverRegistrarImage": bundle.KubeDistro.NodeDriverRegistrar.VersionedImage(),
"livenessProbeImage": bundle.KubeDistro.LivenessProbe.VersionedImage(),
"externalAttacherImage": bundle.KubeDistro.ExternalAttacher.VersionedImage(),
"externalProvisionerImage": bundle.KubeDistro.ExternalProvisioner.VersionedImage(),
"thumbprint": datacenterSpec.Thumbprint,
"vsphereDatacenter": datacenterSpec.Datacenter,
"controlPlaneVsphereDatastore": controlPlaneMachineSpec.Datastore,
"controlPlaneVsphereFolder": controlPlaneMachineSpec.Folder,
"managerImage": bundle.VSphere.Manager.VersionedImage(),
"kubeVipImage": bundle.VSphere.KubeVip.VersionedImage(),
"insecure": datacenterSpec.Insecure,
"vsphereNetwork": datacenterSpec.Network,
"controlPlaneVsphereResourcePool": controlPlaneMachineSpec.ResourcePool,
"vsphereServer": datacenterSpec.Server,
"controlPlaneVsphereStoragePolicyName": controlPlaneMachineSpec.StoragePolicyName,
"vsphereTemplate": controlPlaneMachineSpec.Template,
"controlPlaneVMsMemoryMiB": controlPlaneMachineSpec.MemoryMiB,
"controlPlaneVMsNumCPUs": controlPlaneMachineSpec.NumCPUs,
"controlPlaneDiskGiB": controlPlaneMachineSpec.DiskGiB,
"controlPlaneTagIDs": controlPlaneMachineSpec.TagIDs,
"etcdTagIDs": etcdMachineSpec.TagIDs,
"controlPlaneSshUsername": firstControlPlaneMachinesUser.Name,
"vsphereControlPlaneSshAuthorizedKey": controlPlaneSSHKey,
"podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks,
"serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks,
"etcdExtraArgs": etcdExtraArgs.ToPartialYaml(),
"etcdCipherSuites": crypto.SecureCipherSuitesString(),
"apiserverExtraArgs": apiServerExtraArgs.ToPartialYaml(),
"controllerManagerExtraArgs": controllerManagerExtraArgs.ToPartialYaml(),
"schedulerExtraArgs": sharedExtraArgs.ToPartialYaml(),
"kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(),
"format": format,
"externalEtcdVersion": bundle.KubeDistro.EtcdVersion,
"etcdImage": bundle.KubeDistro.EtcdImage.VersionedImage(),
"eksaSystemNamespace": constants.EksaSystemNamespace,
"cpiResourceSetName": cpiResourceSetName(clusterSpec),
"eksaVsphereUsername": vuc.EksaVsphereUsername,
"eksaVspherePassword": vuc.EksaVspherePassword,
"eksaCloudProviderUsername": vuc.EksaVsphereCPUsername,
"eksaCloudProviderPassword": vuc.EksaVsphereCPPassword,
"controlPlaneCloneMode": controlPlaneMachineSpec.CloneMode,
"etcdCloneMode": etcdMachineSpec.CloneMode,
}
auditPolicy, err := common.GetAuditPolicy(clusterSpec.Cluster.Spec.KubernetesVersion)
if err != nil {
return nil, err
}
values["auditPolicy"] = auditPolicy
if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil {
registryMirror := registrymirror.FromCluster(clusterSpec.Cluster)
values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap)
values["mirrorBase"] = registryMirror.BaseRegistry
values["insecureSkip"] = registryMirror.InsecureSkipVerify
values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror())
if len(registryMirror.CACertContent) > 0 {
values["registryCACert"] = registryMirror.CACertContent
}
if registryMirror.Auth {
values["registryAuth"] = registryMirror.Auth
username, password, err := config.ReadCredentials()
if err != nil {
return values, err
}
values["registryUsername"] = username
values["registryPassword"] = password
}
}
if clusterSpec.Cluster.Spec.ProxyConfiguration != nil {
values["proxyConfig"] = true
capacity := len(clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks) +
len(clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks) +
len(clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy) + 4
noProxyList := make([]string, 0, capacity)
noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks...)
noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks...)
noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy...)
// Add no-proxy defaults
noProxyList = append(noProxyList, clusterapi.NoProxyDefaults()...)
noProxyList = append(noProxyList,
datacenterSpec.Server,
clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host,
)
values["httpProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpProxy
values["httpsProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpsProxy
values["noProxy"] = noProxyList
}
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
firstEtcdMachinesUser := etcdMachineSpec.Users[0]
etcdSSHKey, err := common.StripSshAuthorizedKeyComment(firstEtcdMachinesUser.SshAuthorizedKeys[0])
if err != nil {
return nil, fmt.Errorf("formatting ssh key for vsphere etcd template: %v", err)
}
values["externalEtcd"] = true
values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count
values["etcdVsphereDatastore"] = etcdMachineSpec.Datastore
values["etcdVsphereFolder"] = etcdMachineSpec.Folder
values["etcdDiskGiB"] = etcdMachineSpec.DiskGiB
values["etcdVMsMemoryMiB"] = etcdMachineSpec.MemoryMiB
values["etcdVMsNumCPUs"] = etcdMachineSpec.NumCPUs
values["etcdVsphereResourcePool"] = etcdMachineSpec.ResourcePool
values["etcdVsphereStoragePolicyName"] = etcdMachineSpec.StoragePolicyName
values["etcdSshUsername"] = firstEtcdMachinesUser.Name
values["vsphereEtcdSshAuthorizedKey"] = etcdSSHKey
if etcdMachineSpec.HostOSConfiguration != nil {
if etcdMachineSpec.HostOSConfiguration.NTPConfiguration != nil {
values["etcdNtpServers"] = etcdMachineSpec.HostOSConfiguration.NTPConfiguration.Servers
}
if etcdMachineSpec.HostOSConfiguration.CertBundles != nil {
values["etcdCertBundles"] = etcdMachineSpec.HostOSConfiguration.CertBundles
}
if etcdMachineSpec.HostOSConfiguration.BottlerocketConfiguration != nil {
if etcdMachineSpec.HostOSConfiguration.BottlerocketConfiguration.Kernel != nil &&
etcdMachineSpec.HostOSConfiguration.BottlerocketConfiguration.Kernel.SysctlSettings != nil {
values["etcdKernelSettings"] = etcdMachineSpec.HostOSConfiguration.BottlerocketConfiguration.Kernel.SysctlSettings
}
if etcdMachineSpec.HostOSConfiguration.BottlerocketConfiguration.Boot != nil &&
etcdMachineSpec.HostOSConfiguration.BottlerocketConfiguration.Boot.BootKernelParameters != nil {
values["etcdBootParameters"] = etcdMachineSpec.HostOSConfiguration.BottlerocketConfiguration.Boot.BootKernelParameters
}
}
}
}
if controlPlaneMachineSpec.OSFamily == anywherev1.Bottlerocket {
values["format"] = string(anywherev1.Bottlerocket)
values["pauseRepository"] = bundle.KubeDistro.Pause.Image()
values["pauseVersion"] = bundle.KubeDistro.Pause.Tag()
values["bottlerocketBootstrapRepository"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Image()
values["bottlerocketBootstrapVersion"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Tag()
}
if len(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints) > 0 {
values["controlPlaneTaints"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints
}
if clusterSpec.AWSIamConfig != nil {
values["awsIamAuth"] = true
}
if controlPlaneMachineSpec.HostOSConfiguration != nil {
if controlPlaneMachineSpec.HostOSConfiguration.NTPConfiguration != nil {
values["cpNtpServers"] = controlPlaneMachineSpec.HostOSConfiguration.NTPConfiguration.Servers
}
if controlPlaneMachineSpec.HostOSConfiguration.CertBundles != nil {
values["certBundles"] = controlPlaneMachineSpec.HostOSConfiguration.CertBundles
}
brSettings, err := common.GetCAPIBottlerocketSettingsConfig(controlPlaneMachineSpec.HostOSConfiguration.BottlerocketConfiguration)
if err != nil {
return nil, err
}
values["bottlerocketSettings"] = brSettings
}
return values, nil
}
func buildTemplateMapMD(
clusterSpec *cluster.Spec,
datacenterSpec anywherev1.VSphereDatacenterConfigSpec,
workerNodeGroupMachineSpec anywherev1.VSphereMachineConfigSpec,
workerNodeGroupConfiguration anywherev1.WorkerNodeGroupConfiguration,
) (map[string]interface{}, error) {
bundle := clusterSpec.VersionsBundle
format := "cloud-config"
kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs().
Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)).
Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf))
firstUser := workerNodeGroupMachineSpec.Users[0]
sshKey, err := common.StripSshAuthorizedKeyComment(firstUser.SshAuthorizedKeys[0])
if err != nil {
return nil, fmt.Errorf("formatting ssh key for vsphere workers template: %v", err)
}
values := map[string]interface{}{
"clusterName": clusterSpec.Cluster.Name,
"kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag,
"thumbprint": datacenterSpec.Thumbprint,
"vsphereDatacenter": datacenterSpec.Datacenter,
"workerVsphereDatastore": workerNodeGroupMachineSpec.Datastore,
"workerVsphereFolder": workerNodeGroupMachineSpec.Folder,
"vsphereNetwork": datacenterSpec.Network,
"workerVsphereResourcePool": workerNodeGroupMachineSpec.ResourcePool,
"vsphereServer": datacenterSpec.Server,
"workerVsphereStoragePolicyName": workerNodeGroupMachineSpec.StoragePolicyName,
"vsphereTemplate": workerNodeGroupMachineSpec.Template,
"workloadVMsMemoryMiB": workerNodeGroupMachineSpec.MemoryMiB,
"workloadVMsNumCPUs": workerNodeGroupMachineSpec.NumCPUs,
"workloadDiskGiB": workerNodeGroupMachineSpec.DiskGiB,
"workerTagIDs": workerNodeGroupMachineSpec.TagIDs,
"workerSshUsername": firstUser.Name,
"vsphereWorkerSshAuthorizedKey": sshKey,
"format": format,
"eksaSystemNamespace": constants.EksaSystemNamespace,
"kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(),
"workerReplicas": *workerNodeGroupConfiguration.Count,
"workerNodeGroupName": fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name),
"workerNodeGroupTaints": workerNodeGroupConfiguration.Taints,
"autoscalingConfig": workerNodeGroupConfiguration.AutoScalingConfiguration,
"workerCloneMode": workerNodeGroupMachineSpec.CloneMode,
}
if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil {
registryMirror := registrymirror.FromCluster(clusterSpec.Cluster)
values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap)
values["mirrorBase"] = registryMirror.BaseRegistry
values["insecureSkip"] = registryMirror.InsecureSkipVerify
values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror())
if len(registryMirror.CACertContent) > 0 {
values["registryCACert"] = registryMirror.CACertContent
}
if registryMirror.Auth {
values["registryAuth"] = registryMirror.Auth
username, password, err := config.ReadCredentials()
if err != nil {
return values, err
}
values["registryUsername"] = username
values["registryPassword"] = password
}
}
if clusterSpec.Cluster.Spec.ProxyConfiguration != nil {
values["proxyConfig"] = true
capacity := len(clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks) +
len(clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks) +
len(clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy) + 4
noProxyList := make([]string, 0, capacity)
noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks...)
noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks...)
noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy...)
// Add no-proxy defaults
noProxyList = append(noProxyList, clusterapi.NoProxyDefaults()...)
noProxyList = append(noProxyList,
datacenterSpec.Server,
clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host,
)
values["httpProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpProxy
values["httpsProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpsProxy
values["noProxy"] = noProxyList
}
if workerNodeGroupMachineSpec.OSFamily == anywherev1.Bottlerocket {
values["format"] = string(anywherev1.Bottlerocket)
values["pauseRepository"] = bundle.KubeDistro.Pause.Image()
values["pauseVersion"] = bundle.KubeDistro.Pause.Tag()
values["bottlerocketBootstrapRepository"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Image()
values["bottlerocketBootstrapVersion"] = bundle.BottleRocketHostContainers.KubeadmBootstrap.Tag()
}
if workerNodeGroupMachineSpec.HostOSConfiguration != nil {
if workerNodeGroupMachineSpec.HostOSConfiguration.NTPConfiguration != nil {
values["ntpServers"] = workerNodeGroupMachineSpec.HostOSConfiguration.NTPConfiguration.Servers
}
if workerNodeGroupMachineSpec.HostOSConfiguration.CertBundles != nil {
values["certBundles"] = workerNodeGroupMachineSpec.HostOSConfiguration.CertBundles
}
brSettings, err := common.GetCAPIBottlerocketSettingsConfig(workerNodeGroupMachineSpec.HostOSConfiguration.BottlerocketConfiguration)
if err != nil {
return nil, err
}
values["bottlerocketSettings"] = brSettings
}
return values, nil
}
| 444 |
eks-anywhere | aws | Go | package vsphere_test
import (
"testing"
"time"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneNoKubeVersion(t *testing.T) {
clusterSpec := vsphereClusterSpec()
g := NewWithT(t)
vs := vsphere.NewVsphereTemplateBuilder(time.Now)
_, err := vs.GenerateCAPISpecControlPlane(clusterSpec)
g.Expect(err).NotTo(MatchError(ContainSubstring("error building template map from CP")))
}
func TestVsphereTemplateBuilderGenerateCAPISpecWorkersInvalidSSHKey(t *testing.T) {
g := NewWithT(t)
spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml")
firstMachineConfigName := spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
machineConfig := spec.VSphereMachineConfigs[firstMachineConfigName]
machineConfig.Spec.Users[0].SshAuthorizedKeys[0] = invalidSSHKey()
builder := vsphere.NewVsphereTemplateBuilder(time.Now)
_, err := builder.GenerateCAPISpecWorkers(spec, nil, nil)
g.Expect(err).To(
MatchError(ContainSubstring("formatting ssh key for vsphere workers template: ssh")),
)
}
func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidControlPlaneSSHKey(t *testing.T) {
g := NewWithT(t)
spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml")
controlPlaneMachineConfigName := spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
machineConfig := spec.VSphereMachineConfigs[controlPlaneMachineConfigName]
machineConfig.Spec.Users[0].SshAuthorizedKeys[0] = invalidSSHKey()
builder := vsphere.NewVsphereTemplateBuilder(time.Now)
_, err := builder.GenerateCAPISpecControlPlane(spec, nil, nil)
g.Expect(err).To(
MatchError(ContainSubstring("formatting ssh key for vsphere control plane template: ssh")),
)
}
func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidEtcdSSHKey(t *testing.T) {
g := NewWithT(t)
spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml")
etcdMachineConfigName := spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
machineConfig := spec.VSphereMachineConfigs[etcdMachineConfigName]
machineConfig.Spec.Users[0].SshAuthorizedKeys[0] = invalidSSHKey()
builder := vsphere.NewVsphereTemplateBuilder(time.Now)
_, err := builder.GenerateCAPISpecControlPlane(spec, nil, nil)
g.Expect(err).To(
MatchError(ContainSubstring("formatting ssh key for vsphere etcd template: ssh")),
)
}
func invalidSSHKey() string {
return "ssh-rsa AAAA B3NzaC1K73CeQ== [email protected]"
}
func vsphereClusterSpec(opts ...test.ClusterSpecOpt) *cluster.Spec {
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = "test-cluster"
s.Cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &v1alpha1.Endpoint{
Host: "test-ip",
},
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}
s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{
Name: "md-0",
Count: ptr.Int(3),
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.VSphereMachineConfigKind,
Name: "eksa-unit-test",
},
}}
s.Cluster.Spec.ClusterNetwork = v1alpha1.ClusterNetwork{
CNIConfig: &v1alpha1.CNIConfig{Cilium: &v1alpha1.CiliumConfig{}},
Pods: v1alpha1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: v1alpha1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
}
s.Cluster.Spec.DatacenterRef = v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "eksa-unit-test",
}
s.VSphereDatacenter = &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Datacenter: "test",
Network: "test",
Server: "test",
},
}
s.Cluster.Spec.DatacenterRef = v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "vsphere test",
}
s.VSphereMachineConfigs = map[string]*v1alpha1.VSphereMachineConfig{
"eksa-unit-test": {
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{
{
Name: "capv",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
},
},
}
})
for _, op := range opts {
op(spec)
}
return spec
}
| 136 |
eks-anywhere | aws | Go | package vsphere
import (
"context"
_ "embed"
"encoding/json"
"errors"
"fmt"
"net"
"path/filepath"
"gopkg.in/yaml.v2"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/govmomi"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
)
const (
vsphereRootPath = "/"
)
type PrivAssociation struct {
objectType string
privsContent string
path string
}
type missingPriv struct {
Username string `yaml:"username"`
ObjectType string `yaml:"objectType"`
Path string `yaml:"path"`
Permissions []string `yaml:"permissions"`
}
type VSphereClientBuilder interface {
Build(ctx context.Context, host string, username string, password string, insecure bool, datacenter string) (govmomi.VSphereClient, error)
}
type Validator struct {
govc ProviderGovcClient
vSphereClientBuilder VSphereClientBuilder
}
// NewValidator initializes the client for VSphere provider validations.
func NewValidator(govc ProviderGovcClient, vscb VSphereClientBuilder) *Validator {
return &Validator{
govc: govc,
vSphereClientBuilder: vscb,
}
}
func (v *Validator) validateVCenterAccess(ctx context.Context, server string) error {
if err := v.govc.ValidateVCenterConnection(ctx, server); err != nil {
return fmt.Errorf("failed validating connection to vCenter: %v", err)
}
logger.MarkPass("Connected to server")
if err := v.govc.ValidateVCenterAuthentication(ctx); err != nil {
return fmt.Errorf("failed validating credentials for vCenter: %v", err)
}
logger.MarkPass("Authenticated to vSphere")
return nil
}
func (v *Validator) ValidateVCenterConfig(ctx context.Context, datacenterConfig *anywherev1.VSphereDatacenterConfig) error {
if err := v.validateVCenterAccess(ctx, datacenterConfig.Spec.Server); err != nil {
return err
}
if err := v.validateThumbprint(ctx, datacenterConfig); err != nil {
return err
}
if err := v.validateDatacenter(ctx, datacenterConfig.Spec.Datacenter); err != nil {
return err
}
logger.MarkPass("Datacenter validated")
if err := v.validateNetwork(ctx, datacenterConfig.Spec.Network); err != nil {
return err
}
logger.MarkPass("Network validated")
return nil
}
func (v *Validator) validateMachineConfigTagsExist(ctx context.Context, machineConfigs []*anywherev1.VSphereMachineConfig) error {
tags, err := v.govc.ListTags(ctx)
if err != nil {
return fmt.Errorf("failed to check if tags exists in vSphere: %v", err)
}
tagIDs := make([]string, 0, len(tags))
for _, t := range tags {
tagIDs = append(tagIDs, t.Id)
}
idLookup := types.SliceToLookup(tagIDs)
for _, machineConfig := range machineConfigs {
for _, tagID := range machineConfig.Spec.TagIDs {
if !idLookup.IsPresent(tagID) {
return fmt.Errorf("tag (%s) does not exist in vSphere. please provide a valid tag id in the urn format (example: urn:vmomi:InventoryServiceTag:8e0ce079-0677-48d6-8865-19ada4e6dabd:GLOBAL)", tagID)
}
}
}
logger.MarkPass("Machine config tags validated")
return nil
}
// ValidateClusterMachineConfigs validates all the attributes of etcd, control plane, and worker node VSphereMachineConfigs.
func (v *Validator) ValidateClusterMachineConfigs(ctx context.Context, vsphereClusterSpec *Spec) error {
var etcdMachineConfig *anywherev1.VSphereMachineConfig
controlPlaneMachineConfig := vsphereClusterSpec.controlPlaneMachineConfig()
if controlPlaneMachineConfig == nil {
return fmt.Errorf("cannot find VSphereMachineConfig %v for control plane", vsphereClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name)
}
for _, workerNodeGroupConfiguration := range vsphereClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workerNodeGroupMachineConfig := vsphereClusterSpec.workerMachineConfig(workerNodeGroupConfiguration)
if workerNodeGroupMachineConfig == nil {
return fmt.Errorf("cannot find VSphereMachineConfig %v for worker nodes", workerNodeGroupConfiguration.MachineGroupRef.Name)
}
}
if vsphereClusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdMachineConfig = vsphereClusterSpec.etcdMachineConfig()
if etcdMachineConfig == nil {
return fmt.Errorf("cannot find VSphereMachineConfig %v for etcd machines", vsphereClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name)
}
if !v.sameOSFamily(vsphereClusterSpec.VSphereMachineConfigs) {
return errors.New("all VSphereMachineConfigs must have the same osFamily specified")
}
if !v.sameTemplate(vsphereClusterSpec.VSphereMachineConfigs) {
return errors.New("all VSphereMachineConfigs must have the same template specified")
}
if etcdMachineConfig.Spec.HostOSConfiguration != nil && etcdMachineConfig.Spec.HostOSConfiguration.BottlerocketConfiguration != nil && etcdMachineConfig.Spec.HostOSConfiguration.BottlerocketConfiguration.Kubernetes != nil {
logger.Info("Bottlerocket Kubernetes settings are not supported for etcd machines. Ignoring Kubernetes settings for etcd machines.", "etcdMachineConfig", etcdMachineConfig.Name)
}
}
// TODO: move this to api Cluster validations
if err := v.validateControlPlaneIp(vsphereClusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host); err != nil {
return err
}
for _, config := range vsphereClusterSpec.VSphereMachineConfigs {
var b bool // Temporary until we remove the need to pass a bool pointer
err := v.govc.ValidateVCenterSetupMachineConfig(ctx, vsphereClusterSpec.VSphereDatacenter, config, &b) // TODO: remove side effects from this implementation or directly move it to set defaults (pointer to bool is not needed)
if err != nil {
return fmt.Errorf("validating vCenter setup for VSphereMachineConfig %v: %v", config.Name, err)
}
}
if err := v.validateTemplate(ctx, vsphereClusterSpec, controlPlaneMachineConfig); err != nil {
logger.V(1).Info("Control plane template validation failed.")
return err
}
if err := v.validateMachineConfigTagsExist(ctx, vsphereClusterSpec.machineConfigs()); err != nil {
return err
}
logger.MarkPass("Control plane and Workload templates validated")
for _, mc := range vsphereClusterSpec.VSphereMachineConfigs {
if mc.OSFamily() == v1alpha1.Bottlerocket {
if err := v.validateBRHardDiskSize(ctx, vsphereClusterSpec, mc); err != nil {
return fmt.Errorf("failed validating BR Hard Disk size: %v", err)
}
}
}
return nil
}
func (v *Validator) validateControlPlaneIp(ip string) error {
// check if controlPlaneEndpointIp is valid
parsedIp := net.ParseIP(ip)
if parsedIp == nil {
return fmt.Errorf("cluster controlPlaneConfiguration.Endpoint.Host is invalid: %s", ip)
}
return nil
}
func (v *Validator) validateTemplate(ctx context.Context, spec *Spec, machineConfig *anywherev1.VSphereMachineConfig) error {
if err := v.validateTemplatePresence(ctx, spec.VSphereDatacenter.Spec.Datacenter, machineConfig); err != nil {
return err
}
if err := v.validateTemplateTags(ctx, spec, machineConfig); err != nil {
return err
}
return nil
}
func (v *Validator) validateTemplatePresence(ctx context.Context, datacenter string, machineConfig *anywherev1.VSphereMachineConfig) error {
templateFullPath, err := v.govc.SearchTemplate(ctx, datacenter, machineConfig.Spec.Template)
if err != nil {
return fmt.Errorf("validating template: %v", err)
}
if len(templateFullPath) <= 0 {
return fmt.Errorf("template <%s> not found. Has the template been imported?", machineConfig.Spec.Template)
}
return nil
}
func (v *Validator) validateTemplateTags(ctx context.Context, spec *Spec, machineConfig *anywherev1.VSphereMachineConfig) error {
tags, err := v.govc.GetTags(ctx, machineConfig.Spec.Template)
if err != nil {
return fmt.Errorf("validating template tags: %v", err)
}
tagsLookup := types.SliceToLookup(tags)
for _, t := range requiredTemplateTags(spec.Spec, machineConfig) {
if !tagsLookup.IsPresent(t) {
// TODO: maybe add help text about to how to tag a template?
return fmt.Errorf("template %s is missing tag %s", machineConfig.Spec.Template, t)
}
}
return nil
}
func (v *Validator) validateBRHardDiskSize(ctx context.Context, spec *Spec, machineConfigSpec *anywherev1.VSphereMachineConfig) error {
dataCenter := spec.Config.VSphereDatacenter.Spec.Datacenter
template := machineConfigSpec.Spec.Template
hardDiskMap, err := v.govc.GetHardDiskSize(ctx, template, dataCenter)
if err != nil {
return fmt.Errorf("validating hard disk size: %v", err)
}
if len(hardDiskMap) == 0 {
return fmt.Errorf("no hard disks found for template: %v", template)
} else if len(hardDiskMap) > 1 {
if hardDiskMap[disk1] != 2097152 { // 2GB in KB to avoid roundoff errors
return fmt.Errorf("Incorrect disk size for disk1 - expected: 2097152 kB got: %v", hardDiskMap[disk1])
} else if hardDiskMap[disk2] != 20971520 { // 20GB in KB to avoid roundoff errors
return fmt.Errorf("Incorrect disk size for disk2 - expected: 20971520 kB got: %v", hardDiskMap[disk2])
}
} else if hardDiskMap[disk1] != 23068672 { // 22GB in KB to avoid roundoff errors
return fmt.Errorf("Incorrect disk size for disk1 - expected: 23068672 kB got: %v", hardDiskMap[disk1])
}
logger.V(5).Info("Bottlerocket Disk size validated: ", "diskMap", hardDiskMap)
return nil
}
func (v *Validator) validateThumbprint(ctx context.Context, datacenterConfig *anywherev1.VSphereDatacenterConfig) error {
// No need to validate thumbprint in insecure mode
if datacenterConfig.Spec.Insecure {
return nil
}
// If cert is not self signed, thumbprint is ignored
if !v.govc.IsCertSelfSigned(ctx) {
return nil
}
if datacenterConfig.Spec.Thumbprint == "" {
return fmt.Errorf("thumbprint is required for secure mode with self-signed certificates")
}
thumbprint, err := v.govc.GetCertThumbprint(ctx)
if err != nil {
return err
}
if thumbprint != datacenterConfig.Spec.Thumbprint {
return fmt.Errorf("thumbprint mismatch detected, expected: %s, actual: %s", datacenterConfig.Spec.Thumbprint, thumbprint)
}
return nil
}
func (v *Validator) validateDatacenter(ctx context.Context, datacenter string) error {
exists, err := v.govc.DatacenterExists(ctx, datacenter)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("datacenter %s not found", datacenter)
}
return nil
}
func (v *Validator) validateNetwork(ctx context.Context, network string) error {
exists, err := v.govc.NetworkExists(ctx, network)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("network %s not found", network)
}
return nil
}
func (v *Validator) collectSpecMachineConfigs(ctx context.Context, spec *Spec) ([]*anywherev1.VSphereMachineConfig, error) {
controlPlaneMachineConfig := spec.controlPlaneMachineConfig()
machineConfigs := []*anywherev1.VSphereMachineConfig{controlPlaneMachineConfig}
for _, workerNodeGroupConfiguration := range spec.Cluster.Spec.WorkerNodeGroupConfigurations {
workerNodeGroupMachineConfig := spec.workerMachineConfig(workerNodeGroupConfiguration)
machineConfigs = append(machineConfigs, workerNodeGroupMachineConfig)
}
if spec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdMachineConfig := spec.etcdMachineConfig()
machineConfigs = append(machineConfigs, etcdMachineConfig)
}
return machineConfigs, nil
}
func (v *Validator) validateVsphereUserPrivs(ctx context.Context, vSphereClusterSpec *Spec) error {
var passed bool
var err error
vuc := config.NewVsphereUserConfig()
if passed, err = v.validateUserPrivs(ctx, vSphereClusterSpec, vuc); err != nil {
return err
}
markPrivsValidationPass(passed, vuc.EksaVsphereUsername)
if len(vuc.EksaVsphereCPUsername) > 0 && vuc.EksaVsphereCPUsername != vuc.EksaVsphereUsername {
if passed, err = v.validateCPUserPrivs(ctx, vSphereClusterSpec, vuc); err != nil {
return err
}
markPrivsValidationPass(passed, vuc.EksaVsphereCPUsername)
}
return nil
}
func markPrivsValidationPass(passed bool, username string) {
if passed {
s := fmt.Sprintf("%s user vSphere privileges validated", username)
logger.MarkPass(s)
}
}
func (v *Validator) validateUserPrivs(ctx context.Context, spec *Spec, vuc *config.VSphereUserConfig) (bool, error) {
machineConfigs, err := v.collectSpecMachineConfigs(ctx, spec)
if err != nil {
return false, err
}
requiredPrivAssociations := []PrivAssociation{
// validate global root priv settings are correct
{
objectType: govmomi.VSphereTypeFolder,
privsContent: config.VSphereGlobalPrivsFile,
path: vsphereRootPath,
},
{
objectType: govmomi.VSphereTypeNetwork,
privsContent: config.VSphereUserPrivsFile,
path: spec.VSphereDatacenter.Spec.Network,
},
}
seen := map[string]interface{}{}
for _, mc := range machineConfigs {
if _, ok := seen[mc.Spec.Datastore]; !ok {
requiredPrivAssociations = append(requiredPrivAssociations, PrivAssociation{
objectType: govmomi.VSphereTypeDatastore,
privsContent: config.VSphereUserPrivsFile,
path: mc.Spec.Datastore,
},
)
seen[mc.Spec.Datastore] = 1
}
if _, ok := seen[mc.Spec.ResourcePool]; !ok {
// do something here
requiredPrivAssociations = append(requiredPrivAssociations, PrivAssociation{
objectType: govmomi.VSphereTypeResourcePool,
privsContent: config.VSphereUserPrivsFile,
path: mc.Spec.ResourcePool,
})
seen[mc.Spec.ResourcePool] = 1
}
if _, ok := seen[mc.Spec.Folder]; !ok {
// validate Administrator role (all privs) on VM folder and Template folder
requiredPrivAssociations = append(requiredPrivAssociations, PrivAssociation{
objectType: govmomi.VSphereTypeFolder,
privsContent: config.VSphereAdminPrivsFile,
path: mc.Spec.Folder,
})
seen[mc.Spec.Folder] = 1
}
if _, ok := seen[mc.Spec.Template]; !ok {
// ToDo: add more sophisticated validation around a scenario where someone has uploaded templates
// on their own and does not want to allow EKSA user write access to templates
// Verify privs on the template
requiredPrivAssociations = append(requiredPrivAssociations, PrivAssociation{
objectType: govmomi.VSphereTypeVirtualMachine,
privsContent: config.VSphereAdminPrivsFile,
path: mc.Spec.Template,
})
seen[mc.Spec.Template] = 1
}
if _, ok := seen[filepath.Dir(mc.Spec.Template)]; !ok {
// Verify privs on the template directory
requiredPrivAssociations = append(requiredPrivAssociations, PrivAssociation{
objectType: govmomi.VSphereTypeFolder,
privsContent: config.VSphereAdminPrivsFile,
path: filepath.Dir(mc.Spec.Template),
})
seen[filepath.Dir(mc.Spec.Template)] = 1
}
}
host := spec.VSphereDatacenter.Spec.Server
datacenter := spec.VSphereDatacenter.Spec.Datacenter
vsc, err := v.vSphereClientBuilder.Build(
ctx,
host,
vuc.EksaVsphereUsername,
vuc.EksaVspherePassword,
spec.VSphereDatacenter.Spec.Insecure,
datacenter,
)
if err != nil {
return false, err
}
return v.validatePrivs(ctx, requiredPrivAssociations, vsc)
}
func (v *Validator) validateCPUserPrivs(ctx context.Context, spec *Spec, vuc *config.VSphereUserConfig) (bool, error) {
// CP role just needs read only
privObjs := []PrivAssociation{
{
objectType: govmomi.VSphereTypeFolder,
privsContent: config.VSphereReadOnlyPrivs,
path: vsphereRootPath,
},
}
host := spec.VSphereDatacenter.Spec.Server
datacenter := spec.VSphereDatacenter.Spec.Datacenter
vsc, err := v.vSphereClientBuilder.Build(
ctx,
host,
vuc.EksaVsphereCPUsername,
vuc.EksaVsphereCPPassword,
spec.VSphereDatacenter.Spec.Insecure,
datacenter,
)
if err != nil {
return false, err
}
return v.validatePrivs(ctx, privObjs, vsc)
}
func (v *Validator) validatePrivs(ctx context.Context, privObjs []PrivAssociation, vsc govmomi.VSphereClient) (bool, error) {
var privs []string
var err error
missingPrivs := []missingPriv{}
passed := false
for _, obj := range privObjs {
path := obj.path
privsContent := obj.privsContent
t := obj.objectType
username := vsc.Username()
privs, err = v.getMissingPrivs(ctx, vsc, path, t, privsContent, username)
if err != nil {
return passed, err
} else if len(privs) > 0 {
mp := missingPriv{
Username: username,
ObjectType: t,
Path: path,
Permissions: privs,
}
missingPrivs = append(missingPrivs, mp)
content, err := yaml.Marshal(mp)
if err == nil {
s := fmt.Sprintf(" Warning: User %s missing %d vSphere permissions on %s, cluster creation may fail.\nRe-run create cluster with --verbosity=3 to see specific missing permissions.", username, len(privs), path)
logger.MarkWarning(s)
s = fmt.Sprintf("Missing Permissions:\n%s", string(content))
logger.V(3).Info(s)
} else {
s := fmt.Sprintf(" Warning: failed to list missing privs: %v", err)
logger.MarkWarning(s)
}
}
}
if len(missingPrivs) == 0 {
passed = true
}
return passed, nil
}
func checkRequiredPrivs(requiredPrivs []string, hasPrivs []string) []string {
hp := map[string]interface{}{}
for _, val := range hasPrivs {
hp[val] = 1
}
missingPrivs := []string{}
for _, p := range requiredPrivs {
if _, ok := hp[p]; !ok {
missingPrivs = append(missingPrivs, p)
}
}
return missingPrivs
}
func (v *Validator) getMissingPrivs(ctx context.Context, vsc govmomi.VSphereClient, path string, objType string, requiredPrivsContent string, username string) ([]string, error) {
var requiredPrivs []string
err := json.Unmarshal([]byte(requiredPrivsContent), &requiredPrivs)
if err != nil {
return nil, err
}
hasPrivs, err := vsc.GetPrivsOnEntity(ctx, path, objType, username)
if err != nil {
return nil, err
}
missingPrivs := checkRequiredPrivs(requiredPrivs, hasPrivs)
return missingPrivs, nil
}
func (v *Validator) sameOSFamily(configs map[string]*anywherev1.VSphereMachineConfig) bool {
c := getRandomMachineConfig(configs)
osFamily := c.Spec.OSFamily
for _, machineConfig := range configs {
if machineConfig.Spec.OSFamily != osFamily {
return false
}
}
return true
}
func (v *Validator) sameTemplate(configs map[string]*anywherev1.VSphereMachineConfig) bool {
c := getRandomMachineConfig(configs)
template := c.Spec.Template
for _, machineConfig := range configs {
if machineConfig.Spec.Template != template {
return false
}
}
return true
}
func getRandomMachineConfig(configs map[string]*anywherev1.VSphereMachineConfig) *anywherev1.VSphereMachineConfig {
var machineConfig *anywherev1.VSphereMachineConfig
for _, c := range configs {
machineConfig = c
break
}
return machineConfig
}
func (v *Validator) validateUpgradeRolloutStrategy(clusterSpec *cluster.Spec) error {
if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil {
return fmt.Errorf("Upgrade rollout strategy customization is not supported for vSphere provider")
}
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
if workerNodeGroupConfiguration.UpgradeRolloutStrategy != nil {
return fmt.Errorf("Upgrade rollout strategy customization is not supported for vSphere provider")
}
}
return nil
}
| 593 |
eks-anywhere | aws | Go | package vsphere
import (
"context"
"encoding/json"
"errors"
"fmt"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/govmomi"
"github.com/aws/eks-anywhere/pkg/govmomi/mocks"
govcmocks "github.com/aws/eks-anywhere/pkg/providers/vsphere/mocks"
)
func TestValidatorValidatePrivs(t *testing.T) {
v := Validator{}
ctrl := gomock.NewController(t)
vsc := mocks.NewMockVSphereClient(ctrl)
ctx := context.Background()
networkPath := "/Datacenter/network/path/foo"
objects := []PrivAssociation{
{
objectType: govmomi.VSphereTypeNetwork,
privsContent: config.VSphereUserPrivsFile,
path: networkPath,
},
}
var privs []string
err := json.Unmarshal([]byte(config.VSphereAdminPrivsFile), &privs)
if err != nil {
t.Fatalf("failed to validate privs: %v", err)
}
vsc.EXPECT().Username().Return("foobar")
vsc.EXPECT().GetPrivsOnEntity(ctx, networkPath, govmomi.VSphereTypeNetwork, "foobar").Return(privs, nil)
passed, err := v.validatePrivs(ctx, objects, vsc)
if passed != true || err != nil {
t.Fatalf("failed to validate privs passed=%v, err=%v", passed, err)
}
}
func TestValidatorValidatePrivsError(t *testing.T) {
v := Validator{}
ctrl := gomock.NewController(t)
vsc := mocks.NewMockVSphereClient(ctrl)
ctx := context.Background()
networkPath := "/Datacenter/network/path/foo"
objects := []PrivAssociation{
{
objectType: govmomi.VSphereTypeNetwork,
privsContent: config.VSphereUserPrivsFile,
path: networkPath,
},
}
var privs []string
err := json.Unmarshal([]byte(config.VSphereAdminPrivsFile), &privs)
if err != nil {
t.Fatalf("failed to validate privs: %v", err)
}
errMsg := "Could not retrieve privs"
g := NewWithT(t)
vsc.EXPECT().Username().Return("foobar")
vsc.EXPECT().GetPrivsOnEntity(ctx, networkPath, govmomi.VSphereTypeNetwork, "foobar").Return(nil, fmt.Errorf(errMsg))
_, err = v.validatePrivs(ctx, objects, vsc)
g.Expect(err).To(MatchError(ContainSubstring(errMsg)))
}
func TestValidatorValidatePrivsMissing(t *testing.T) {
v := Validator{}
ctrl := gomock.NewController(t)
vsc := mocks.NewMockVSphereClient(ctrl)
ctx := context.Background()
folderPath := "/Datacenter/vm/path/foo"
objects := []PrivAssociation{
{
objectType: govmomi.VSphereTypeFolder,
privsContent: config.VSphereAdminPrivsFile,
path: folderPath,
},
}
var privs []string
err := json.Unmarshal([]byte(config.VSphereUserPrivsFile), &privs)
if err != nil {
t.Fatalf("failed to validate privs: %v", err)
}
g := NewWithT(t)
vsc.EXPECT().Username().Return("foobar")
vsc.EXPECT().GetPrivsOnEntity(ctx, folderPath, govmomi.VSphereTypeFolder, "foobar").Return(privs, nil)
passed, err := v.validatePrivs(ctx, objects, vsc)
g.Expect(passed).To(BeEquivalentTo(false))
g.Expect(err).To(BeNil())
}
func TestValidatorValidatePrivsBadJson(t *testing.T) {
v := Validator{}
ctrl := gomock.NewController(t)
vsc := mocks.NewMockVSphereClient(ctrl)
vsc.EXPECT().Username().Return("foobar")
ctx := context.Background()
networkPath := "/Datacenter/network/path/foo"
g := NewWithT(t)
errMsg := "invalid character 'h' in literal true (expecting 'r')"
objects := []PrivAssociation{
{
objectType: govmomi.VSphereTypeNetwork,
privsContent: "this is bad json",
path: networkPath,
},
}
_, err := v.validatePrivs(ctx, objects, vsc)
g.Expect(err).To(MatchError(ContainSubstring(errMsg)))
}
func clusterSpec() Spec {
cpMachineConfig := &v1alpha1.VSphereMachineConfig{
Spec: v1alpha1.VSphereMachineConfigSpec{
Datastore: "datastore",
ResourcePool: "pool",
Folder: "folder",
Template: "temp",
},
}
return Spec{
Spec: &cluster.Spec{
Config: &cluster.Config{
VSphereDatacenter: &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Datacenter: "SDDC-Datacenter",
Server: "server",
},
},
VSphereMachineConfigs: map[string]*v1alpha1.VSphereMachineConfig{
"test-cp": cpMachineConfig,
},
Cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: &v1alpha1.Ref{
Name: "test-cp",
},
},
},
},
},
},
}
}
func TestValidatorValidateVsphereUserPrivsError(t *testing.T) {
ctrl := gomock.NewController(t)
govc := govcmocks.NewMockProviderGovcClient(ctrl)
vscb := govcmocks.NewMockVSphereClientBuilder(ctrl)
v := Validator{
govc: govc,
vSphereClientBuilder: vscb,
}
spec := clusterSpec()
ctx := context.Background()
vscb.EXPECT().Build(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), spec.VSphereDatacenter.Spec.Datacenter).Return(nil, fmt.Errorf("error"))
g := NewWithT(t)
err := v.validateVsphereUserPrivs(ctx, &spec)
g.Expect(err).To(MatchError(ContainSubstring("error")))
}
func TestValidatorValidateVsphereCPUserPrivsError(t *testing.T) {
ctx := context.Background()
ctrl := gomock.NewController(t)
govc := govcmocks.NewMockProviderGovcClient(ctrl)
vscb := govcmocks.NewMockVSphereClientBuilder(ctrl)
vsc := mocks.NewMockVSphereClient(ctrl)
wantEnv := map[string]string{
config.EksavSphereUsernameKey: "foo",
config.EksavSpherePasswordKey: "bar",
config.EksavSphereCPUsernameKey: "foo2",
config.EksavSphereCPPasswordKey: "bar2",
}
for k, v := range wantEnv {
t.Setenv(k, v)
}
v := Validator{
govc: govc,
vSphereClientBuilder: vscb,
}
var privs []string
err := json.Unmarshal([]byte(config.VSphereUserPrivsFile), &privs)
if err != nil {
t.Fatalf("failed to validate privs: %v", err)
}
spec := clusterSpec()
vsc.EXPECT().Username().Return("foobar").AnyTimes()
vsc.EXPECT().GetPrivsOnEntity(ctx, gomock.Any(), gomock.Any(), "foobar").Return(privs, nil).AnyTimes()
vscb.EXPECT().Build(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), spec.VSphereDatacenter.Spec.Datacenter).Return(vsc, nil)
vscb.EXPECT().Build(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), spec.VSphereDatacenter.Spec.Datacenter).Return(nil, fmt.Errorf("error"))
g := NewWithT(t)
err = v.validateVsphereUserPrivs(ctx, &spec)
g.Expect(err).To(MatchError(ContainSubstring("error")))
}
func TestValidatorValidateMachineConfigTagsExistErrorListingTag(t *testing.T) {
ctrl := gomock.NewController(t)
govc := govcmocks.NewMockProviderGovcClient(ctrl)
ctx := context.Background()
g := NewWithT(t)
v := Validator{
govc: govc,
}
machineConfigs := []*v1alpha1.VSphereMachineConfig{
{
Spec: v1alpha1.VSphereMachineConfigSpec{
TagIDs: []string{"tag-1", "tag-2"},
},
},
}
govc.EXPECT().ListTags(ctx).Return(nil, errors.New("error listing tags"))
err := v.validateMachineConfigTagsExist(ctx, machineConfigs)
g.Expect(err).To(Not(BeNil()))
}
func TestValidatorValidateMachineConfigTagsExistSuccess(t *testing.T) {
ctrl := gomock.NewController(t)
govc := govcmocks.NewMockProviderGovcClient(ctrl)
ctx := context.Background()
g := NewWithT(t)
v := Validator{
govc: govc,
}
machineConfigs := []*v1alpha1.VSphereMachineConfig{
{
Spec: v1alpha1.VSphereMachineConfigSpec{
TagIDs: []string{"tag-1", "tag-2"},
},
},
}
tagIDs := []executables.Tag{
{
Id: "tag-1",
},
{
Id: "tag-2",
},
{
Id: "tag-3",
},
}
govc.EXPECT().ListTags(ctx).Return(tagIDs, nil)
err := v.validateMachineConfigTagsExist(ctx, machineConfigs)
g.Expect(err).To(BeNil())
}
func TestValidatorValidateMachineConfigTagsExistTagDoesNotExist(t *testing.T) {
ctrl := gomock.NewController(t)
govc := govcmocks.NewMockProviderGovcClient(ctrl)
ctx := context.Background()
g := NewWithT(t)
v := Validator{
govc: govc,
}
machineConfigs := []*v1alpha1.VSphereMachineConfig{
{
Spec: v1alpha1.VSphereMachineConfigSpec{
TagIDs: []string{"tag-1", "tag-2"},
},
},
}
tagIDs := []executables.Tag{
{
Id: "tag-1",
},
{
Id: "tag-3",
},
}
govc.EXPECT().ListTags(ctx).Return(tagIDs, nil)
err := v.validateMachineConfigTagsExist(ctx, machineConfigs)
g.Expect(err).To(Not(BeNil()))
}
func TestValidateBRHardDiskSize(t *testing.T) {
ctrl := gomock.NewController(t)
govc := govcmocks.NewMockProviderGovcClient(ctrl)
ctx := context.Background()
v := Validator{
govc: govc,
}
machineConfig := v1alpha1.VSphereMachineConfig{
Spec: v1alpha1.VSphereMachineConfigSpec{
Template: "bottlerocket-kube-v1-21",
},
}
spec := Spec{
Spec: &cluster.Spec{
Config: &cluster.Config{
VSphereDatacenter: &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Datacenter: "SDDC-Datacenter",
},
},
},
},
}
govcErr := errors.New("error GetHardDiskSize()")
tests := []struct {
testName string
returnDiskMap map[string]float64
ifErr error
wantErr error
}{
{
testName: "getHardDiskSize_govc_error",
returnDiskMap: map[string]float64{},
ifErr: govcErr,
wantErr: fmt.Errorf("validating hard disk size: %v", govcErr),
},
{
testName: "getHardDiskSize_empty_map_error",
returnDiskMap: map[string]float64{},
ifErr: nil,
wantErr: fmt.Errorf("no hard disks found for template: %v", "bottlerocket-kube-v1-21"),
},
{
testName: "check_disk1_wrong_size",
returnDiskMap: map[string]float64{"Hard disk 1": 100, "Hard disk 2": 20971520},
ifErr: nil,
wantErr: fmt.Errorf("Incorrect disk size for disk1 - expected: 2097152 kB got: %v", 100),
},
{
testName: "check_disk2_wrong_size",
returnDiskMap: map[string]float64{"Hard disk 1": 2097152, "Hard disk 2": 100},
ifErr: nil,
wantErr: fmt.Errorf("Incorrect disk size for disk2 - expected: 20971520 kB got: %v", 100),
},
{
testName: "check_singleDisk_wrong_size",
returnDiskMap: map[string]float64{"Hard disk 1": 100},
ifErr: nil,
wantErr: fmt.Errorf("Incorrect disk size for disk1 - expected: 23068672 kB got: %v", 100),
},
{
testName: "check_happy_flow",
returnDiskMap: map[string]float64{"Hard disk 1": 2097152, "Hard disk 2": 20971520},
ifErr: nil,
wantErr: nil,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
gt := NewWithT(t)
govc.EXPECT().GetHardDiskSize(ctx, machineConfig.Spec.Template, spec.Config.VSphereDatacenter.Spec.Datacenter).Return(tt.returnDiskMap, tt.ifErr)
err := v.validateBRHardDiskSize(ctx, &spec, &machineConfig)
if err == nil {
gt.Expect(err).To(BeNil())
} else {
gt.Expect(err.Error()).To(Equal(tt.wantErr.Error()))
}
})
}
}
| 413 |
eks-anywhere | aws | Go | package vsphere
import (
"bytes"
"context"
_ "embed"
"fmt"
"os"
"reflect"
"text/template"
"time"
"github.com/Masterminds/sprig"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
corev1 "k8s.io/api/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/govmomi"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/providers/common"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
CredentialsObjectName = "vsphere-credentials"
eksaLicense = "EKSA_LICENSE"
vSphereUsernameKey = "VSPHERE_USERNAME"
vSpherePasswordKey = "VSPHERE_PASSWORD"
vSphereServerKey = "VSPHERE_SERVER"
govcDatacenterKey = "GOVC_DATACENTER"
govcInsecure = "GOVC_INSECURE"
expClusterResourceSetKey = "EXP_CLUSTER_RESOURCE_SET"
defaultTemplateLibrary = "eks-a-templates"
defaultTemplatesFolder = "vm/Templates"
maxRetries = 30
backOffPeriod = 5 * time.Second
disk1 = "Hard disk 1"
disk2 = "Hard disk 2"
)
//go:embed config/template-cp.yaml
var defaultCAPIConfigCP string
//go:embed config/template-md.yaml
var defaultClusterConfigMD string
//go:embed config/secret.yaml
var defaultSecretObject string
var (
eksaVSphereDatacenterResourceType = fmt.Sprintf("vspheredatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaVSphereMachineResourceType = fmt.Sprintf("vspheremachineconfigs.%s", v1alpha1.GroupVersion.Group)
)
var requiredEnvs = []string{vSphereUsernameKey, vSpherePasswordKey, expClusterResourceSetKey}
type vsphereProvider struct {
clusterConfig *v1alpha1.Cluster
providerGovcClient ProviderGovcClient
providerKubectlClient ProviderKubectlClient
writer filewriter.FileWriter
templateBuilder *VsphereTemplateBuilder
skipIPCheck bool
Retrier *retrier.Retrier
validator *Validator
defaulter *Defaulter
ipValidator IPValidator
}
type ProviderGovcClient interface {
SearchTemplate(ctx context.Context, datacenter, template string) (string, error)
LibraryElementExists(ctx context.Context, library string) (bool, error)
GetLibraryElementContentVersion(ctx context.Context, element string) (string, error)
DeleteLibraryElement(ctx context.Context, element string) error
TemplateHasSnapshot(ctx context.Context, template string) (bool, error)
GetWorkloadAvailableSpace(ctx context.Context, datastore string) (float64, error)
ValidateVCenterSetupMachineConfig(ctx context.Context, datacenterConfig *v1alpha1.VSphereDatacenterConfig, machineConfig *v1alpha1.VSphereMachineConfig, selfSigned *bool) error
ValidateVCenterConnection(ctx context.Context, server string) error
ValidateVCenterAuthentication(ctx context.Context) error
IsCertSelfSigned(ctx context.Context) bool
GetCertThumbprint(ctx context.Context) (string, error)
ConfigureCertThumbprint(ctx context.Context, server, thumbprint string) error
DatacenterExists(ctx context.Context, datacenter string) (bool, error)
NetworkExists(ctx context.Context, network string) (bool, error)
CreateLibrary(ctx context.Context, datastore, library string) error
DeployTemplateFromLibrary(ctx context.Context, templateDir, templateName, library, datacenter, datastore, network, resourcePool string, resizeDisk2 bool) error
ImportTemplate(ctx context.Context, library, ovaURL, name string) error
GetVMDiskSizeInGB(ctx context.Context, vm, datacenter string) (int, error)
GetTags(ctx context.Context, path string) (tags []string, err error)
ListTags(ctx context.Context) ([]executables.Tag, error)
CreateTag(ctx context.Context, tag, category string) error
AddTag(ctx context.Context, path, tag string) error
ListCategories(ctx context.Context) ([]string, error)
CreateCategoryForVM(ctx context.Context, name string) error
CreateUser(ctx context.Context, username string, password string) error
UserExists(ctx context.Context, username string) (bool, error)
CreateGroup(ctx context.Context, name string) error
GroupExists(ctx context.Context, name string) (bool, error)
AddUserToGroup(ctx context.Context, name string, username string) error
RoleExists(ctx context.Context, name string) (bool, error)
CreateRole(ctx context.Context, name string, privileges []string) error
SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error
GetHardDiskSize(ctx context.Context, vm, datacenter string) (map[string]float64, error)
}
type ProviderKubectlClient interface {
ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error
CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error
LoadSecret(ctx context.Context, secretObject string, secretObjType string, secretObjectName string, kubeConfFile string) error
GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error)
GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error)
GetEksaVSphereMachineConfig(ctx context.Context, vsphereMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereMachineConfig, error)
GetMachineDeployment(ctx context.Context, machineDeploymentName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error)
GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error)
GetEtcdadmCluster(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*etcdv1.EtcdadmCluster, error)
GetSecretFromNamespace(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.Secret, error)
UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...executables.KubectlOpt) error
RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error
SearchVsphereMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereMachineConfig, error)
SearchVsphereDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error)
SetDaemonSetImage(ctx context.Context, kubeconfigFile, name, namespace, container, image string) error
DeleteEksaDatacenterConfig(ctx context.Context, vsphereDatacenterResourceType string, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) error
DeleteEksaMachineConfig(ctx context.Context, vsphereMachineResourceType string, vsphereMachineConfigName string, kubeconfigFile string, namespace string) error
ApplyTolerationsFromTaintsToDaemonSet(ctx context.Context, oldTaints []corev1.Taint, newTaints []corev1.Taint, dsName string, kubeconfigFile string) error
}
// IPValidator is an interface that defines methods to validate the control plane IP.
type IPValidator interface {
ValidateControlPlaneIPUniqueness(cluster *v1alpha1.Cluster) error
}
func NewProvider(datacenterConfig *v1alpha1.VSphereDatacenterConfig, clusterConfig *v1alpha1.Cluster, providerGovcClient ProviderGovcClient, providerKubectlClient ProviderKubectlClient, writer filewriter.FileWriter, ipValidator IPValidator, now types.NowFunc, skipIpCheck bool) *vsphereProvider { //nolint:revive
// TODO(g-gaston): ignoring linter error for exported function returning unexported member
// We should make it exported, but that would involve a bunch of changes, so will do it separately
vcb := govmomi.NewVMOMIClientBuilder()
v := NewValidator(
providerGovcClient,
vcb,
)
return NewProviderCustomNet(
datacenterConfig,
clusterConfig,
providerGovcClient,
providerKubectlClient,
writer,
ipValidator,
now,
skipIpCheck,
v,
)
}
func NewProviderCustomNet(datacenterConfig *v1alpha1.VSphereDatacenterConfig, clusterConfig *v1alpha1.Cluster, providerGovcClient ProviderGovcClient, providerKubectlClient ProviderKubectlClient, writer filewriter.FileWriter, ipValidator IPValidator, now types.NowFunc, skipIpCheck bool, v *Validator) *vsphereProvider { //nolint:revive
// TODO(g-gaston): ignoring linter error for exported function returning unexported member
// We should make it exported, but that would involve a bunch of changes, so will do it separately
retrier := retrier.NewWithMaxRetries(maxRetries, backOffPeriod)
return &vsphereProvider{
clusterConfig: clusterConfig,
providerGovcClient: providerGovcClient,
providerKubectlClient: providerKubectlClient,
writer: writer,
templateBuilder: NewVsphereTemplateBuilder(
now,
),
skipIPCheck: skipIpCheck,
Retrier: retrier,
validator: v,
defaulter: NewDefaulter(providerGovcClient),
ipValidator: ipValidator,
}
}
func (p *vsphereProvider) UpdateKubeConfig(_ *[]byte, _ string) error {
// customize generated kube config
return nil
}
func (p *vsphereProvider) machineConfigsSpecChanged(ctx context.Context, cc *v1alpha1.Cluster, cluster *types.Cluster, newClusterSpec *cluster.Spec) (bool, error) {
for _, oldMcRef := range cc.MachineConfigRefs() {
existingVmc, err := p.providerKubectlClient.GetEksaVSphereMachineConfig(ctx, oldMcRef.Name, cluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return false, err
}
csmc, ok := newClusterSpec.VSphereMachineConfigs[oldMcRef.Name]
if !ok {
logger.V(3).Info(fmt.Sprintf("Old machine config spec %s not found in the existing spec", oldMcRef.Name))
return true, nil
}
if !reflect.DeepEqual(existingVmc.Spec, csmc.Spec) {
logger.V(3).Info(fmt.Sprintf("New machine config spec %s is different from the existing spec", oldMcRef.Name))
return true, nil
}
}
return false, nil
}
func (p *vsphereProvider) BootstrapClusterOpts(spec *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error) {
return common.BootstrapClusterOpts(p.clusterConfig, spec.VSphereDatacenter.Spec.Server)
}
func (p *vsphereProvider) Name() string {
return constants.VSphereProviderName
}
func (p *vsphereProvider) DatacenterResourceType() string {
return eksaVSphereDatacenterResourceType
}
func (p *vsphereProvider) MachineResourceType() string {
return eksaVSphereMachineResourceType
}
func (p *vsphereProvider) generateSSHKeysIfNotSet(machineConfigs map[string]*v1alpha1.VSphereMachineConfig) error {
var generatedKey string
for _, machineConfig := range machineConfigs {
user := machineConfig.Spec.Users[0]
if user.SshAuthorizedKeys[0] == "" {
if generatedKey != "" { // use the same key
user.SshAuthorizedKeys[0] = generatedKey
} else {
logger.Info("Provided sshAuthorizedKey is not set or is empty, auto-generating new key pair...", "vSphereMachineConfig", machineConfig.Name)
var err error
generatedKey, err = common.GenerateSSHAuthKey(p.writer)
if err != nil {
return err
}
user.SshAuthorizedKeys[0] = generatedKey
}
}
}
return nil
}
func (p *vsphereProvider) DeleteResources(ctx context.Context, clusterSpec *cluster.Spec) error {
for _, mc := range clusterSpec.VSphereMachineConfigs {
if err := p.providerKubectlClient.DeleteEksaMachineConfig(ctx, eksaVSphereMachineResourceType, mc.Name, clusterSpec.ManagementCluster.KubeconfigFile, mc.Namespace); err != nil {
return err
}
}
return p.providerKubectlClient.DeleteEksaDatacenterConfig(ctx,
eksaVSphereDatacenterResourceType,
clusterSpec.VSphereDatacenter.Name,
clusterSpec.ManagementCluster.KubeconfigFile,
clusterSpec.VSphereDatacenter.Namespace,
)
}
func (p *vsphereProvider) PostClusterDeleteValidate(_ context.Context, _ *types.Cluster) error {
// No validations
return nil
}
func (p *vsphereProvider) PostMoveManagementToBootstrap(_ context.Context, _ *types.Cluster) error {
// NOOP
return nil
}
func (p *vsphereProvider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error {
if err := p.validator.validateUpgradeRolloutStrategy(clusterSpec); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
if err := SetupEnvVars(clusterSpec.VSphereDatacenter); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
vSphereClusterSpec := NewSpec(clusterSpec)
if err := p.defaulter.SetDefaultsForDatacenterConfig(ctx, vSphereClusterSpec.VSphereDatacenter); err != nil {
return fmt.Errorf("failed setting default values for vsphere datacenter config: %v", err)
}
if err := vSphereClusterSpec.VSphereDatacenter.Validate(); err != nil {
return err
}
if err := p.validator.ValidateVCenterConfig(ctx, vSphereClusterSpec.VSphereDatacenter); err != nil {
return err
}
if err := p.defaulter.setDefaultsForMachineConfig(ctx, vSphereClusterSpec); err != nil {
return fmt.Errorf("failed setting default values for vsphere machine configs: %v", err)
}
if err := p.validator.ValidateClusterMachineConfigs(ctx, vSphereClusterSpec); err != nil {
return err
}
if err := p.validateDatastoreUsageForCreate(ctx, vSphereClusterSpec); err != nil {
return fmt.Errorf("validating vsphere machine configs datastore usage: %v", err)
}
if err := p.generateSSHKeysIfNotSet(clusterSpec.VSphereMachineConfigs); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
// TODO: move this to validator
if clusterSpec.Cluster.IsManaged() {
for _, mc := range clusterSpec.VSphereMachineConfigs {
em, err := p.providerKubectlClient.SearchVsphereMachineConfig(ctx, mc.GetName(), clusterSpec.ManagementCluster.KubeconfigFile, mc.GetNamespace())
if err != nil {
return err
}
if len(em) > 0 {
return fmt.Errorf("VSphereMachineConfig %s already exists", mc.GetName())
}
}
existingDatacenter, err := p.providerKubectlClient.SearchVsphereDatacenterConfig(ctx, clusterSpec.VSphereDatacenter.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
if len(existingDatacenter) > 0 {
return fmt.Errorf("VSphereDatacenter %s already exists", clusterSpec.VSphereDatacenter.Name)
}
for _, identityProviderRef := range clusterSpec.Cluster.Spec.IdentityProviderRefs {
if identityProviderRef.Kind == v1alpha1.OIDCConfigKind {
clusterSpec.OIDCConfig.SetManagedBy(p.clusterConfig.ManagedBy())
}
}
}
if !p.skipIPCheck {
if err := p.ipValidator.ValidateControlPlaneIPUniqueness(clusterSpec.Cluster); err != nil {
return err
}
} else {
logger.Info("Skipping check for whether control plane ip is in use")
}
if err := p.validator.validateVsphereUserPrivs(ctx, vSphereClusterSpec); err != nil {
return fmt.Errorf("validating vsphere user privileges: %v", err)
}
return nil
}
func (p *vsphereProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, _ *cluster.Spec) error {
if err := p.validator.validateUpgradeRolloutStrategy(clusterSpec); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
if err := SetupEnvVars(clusterSpec.VSphereDatacenter); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
vSphereClusterSpec := NewSpec(clusterSpec)
if err := p.defaulter.SetDefaultsForDatacenterConfig(ctx, vSphereClusterSpec.VSphereDatacenter); err != nil {
return fmt.Errorf("failed setting default values for vsphere datacenter config: %v", err)
}
if err := vSphereClusterSpec.VSphereDatacenter.Validate(); err != nil {
return err
}
if err := p.validator.ValidateVCenterConfig(ctx, vSphereClusterSpec.VSphereDatacenter); err != nil {
return err
}
if err := p.defaulter.setDefaultsForMachineConfig(ctx, vSphereClusterSpec); err != nil {
return fmt.Errorf("failed setting default values for vsphere machine configs: %v", err)
}
if err := p.validator.ValidateClusterMachineConfigs(ctx, vSphereClusterSpec); err != nil {
return err
}
if err := p.validateDatastoreUsageForUpgrade(ctx, vSphereClusterSpec, cluster); err != nil {
return fmt.Errorf("validating vsphere machine configs datastore usage: %v", err)
}
if err := p.validator.validateVsphereUserPrivs(ctx, vSphereClusterSpec); err != nil {
return fmt.Errorf("validating vsphere user privileges: %v", err)
}
err := p.validateMachineConfigsNameUniqueness(ctx, cluster, clusterSpec)
if err != nil {
return fmt.Errorf("failed validate machineconfig uniqueness: %v", err)
}
return nil
}
func (p *vsphereProvider) validateMachineConfigsNameUniqueness(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
prevSpec, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName())
if err != nil {
return err
}
cpMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
if prevSpec.Spec.ControlPlaneConfiguration.MachineGroupRef.Name != cpMachineConfigName {
em, err := p.providerKubectlClient.SearchVsphereMachineConfig(ctx, cpMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace())
if err != nil {
return err
}
if len(em) > 0 {
return fmt.Errorf("control plane VSphereMachineConfig %s already exists", cpMachineConfigName)
}
}
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil && prevSpec.Spec.ExternalEtcdConfiguration != nil {
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
if prevSpec.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name != etcdMachineConfigName {
em, err := p.providerKubectlClient.SearchVsphereMachineConfig(ctx, etcdMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace())
if err != nil {
return err
}
if len(em) > 0 {
return fmt.Errorf("external etcd machineconfig %s already exists", etcdMachineConfigName)
}
}
}
return nil
}
type datastoreUsage struct {
availableSpace float64
needGiBSpace int
}
func (p *vsphereProvider) getPrevMachineConfigDatastoreUsage(ctx context.Context, machineConfig *v1alpha1.VSphereMachineConfig, cluster *types.Cluster, count int) (diskGiB float64, err error) {
em, err := p.providerKubectlClient.GetEksaVSphereMachineConfig(ctx, machineConfig.Name, cluster.KubeconfigFile, machineConfig.GetNamespace())
if err != nil {
return 0, err
}
if em != nil {
return float64(em.Spec.DiskGiB * count), nil
}
return 0, nil
}
func (p *vsphereProvider) getMachineConfigDatastoreRequirements(ctx context.Context, machineConfig *v1alpha1.VSphereMachineConfig, count int) (available float64, need int, err error) {
availableSpace, err := p.providerGovcClient.GetWorkloadAvailableSpace(ctx, machineConfig.Spec.Datastore) // TODO: remove dependency on machineConfig
if err != nil {
return 0, 0, fmt.Errorf("getting datastore details: %v", err)
}
needGiB := machineConfig.Spec.DiskGiB * count
return availableSpace, needGiB, nil
}
func (p *vsphereProvider) calculateDatastoreUsage(ctx context.Context, machineConfig *v1alpha1.VSphereMachineConfig, cluster *types.Cluster, usage map[string]*datastoreUsage, prevCount, newCount int) error {
availableSpace, needGiB, err := p.getMachineConfigDatastoreRequirements(ctx, machineConfig, newCount)
if err != nil {
return err
}
prevUsage, err := p.getPrevMachineConfigDatastoreUsage(ctx, machineConfig, cluster, prevCount)
if err != nil {
return err
}
availableSpace += prevUsage
updateDatastoreUsageMap(machineConfig, needGiB, availableSpace, prevUsage, usage)
return nil
}
func updateDatastoreUsageMap(machineConfig *v1alpha1.VSphereMachineConfig, needGiB int, availableSpace, prevUsage float64, usage map[string]*datastoreUsage) {
if _, ok := usage[machineConfig.Spec.Datastore]; ok {
usage[machineConfig.Spec.Datastore].needGiBSpace += needGiB
usage[machineConfig.Spec.Datastore].availableSpace += prevUsage
} else {
usage[machineConfig.Spec.Datastore] = &datastoreUsage{
availableSpace: availableSpace,
needGiBSpace: needGiB,
}
}
}
func (p *vsphereProvider) validateDatastoreUsageForUpgrade(ctx context.Context, currentClusterSpec *Spec, cluster *types.Cluster) error {
usage := make(map[string]*datastoreUsage)
prevEksaCluster, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, currentClusterSpec.Cluster.GetName())
if err != nil {
return err
}
cpMachineConfig := currentClusterSpec.controlPlaneMachineConfig()
if err := p.calculateDatastoreUsage(ctx, cpMachineConfig, cluster, usage, prevEksaCluster.Spec.ControlPlaneConfiguration.Count, currentClusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count); err != nil {
return fmt.Errorf("calculating datastore usage: %v", err)
}
prevMachineConfigRefs := machineRefSliceToMap(prevEksaCluster.MachineConfigRefs())
for _, workerNodeGroupConfiguration := range currentClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
prevCount := 0
workerMachineConfig := currentClusterSpec.workerMachineConfig(workerNodeGroupConfiguration)
if _, ok := prevMachineConfigRefs[workerNodeGroupConfiguration.MachineGroupRef.Name]; ok {
prevCount = *workerNodeGroupConfiguration.Count
}
if err := p.calculateDatastoreUsage(ctx, workerMachineConfig, cluster, usage, prevCount, *workerNodeGroupConfiguration.Count); err != nil {
return fmt.Errorf("calculating datastore usage: %v", err)
}
}
etcdMachineConfig := currentClusterSpec.etcdMachineConfig()
if etcdMachineConfig != nil {
if err := p.calculateDatastoreUsage(ctx, etcdMachineConfig, cluster, usage, prevEksaCluster.Spec.ExternalEtcdConfiguration.Count, currentClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count); err != nil {
return fmt.Errorf("calculating datastore usage: %v", err)
}
}
for datastore, usage := range usage {
if float64(usage.needGiBSpace) > usage.availableSpace {
return fmt.Errorf("not enough space in datastore %v for given diskGiB and count for respective machine groups", datastore)
}
}
return nil
}
func (p *vsphereProvider) validateDatastoreUsageForCreate(ctx context.Context, vsphereClusterSpec *Spec) error {
usage := make(map[string]*datastoreUsage)
cpMachineConfig := vsphereClusterSpec.controlPlaneMachineConfig()
controlPlaneAvailableSpace, controlPlaneNeedGiB, err := p.getMachineConfigDatastoreRequirements(ctx, cpMachineConfig, vsphereClusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count)
if err != nil {
return err
}
updateDatastoreUsageMap(cpMachineConfig, controlPlaneNeedGiB, controlPlaneAvailableSpace, 0, usage)
for _, workerNodeGroupConfiguration := range vsphereClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workerMachineConfig := vsphereClusterSpec.workerMachineConfig(workerNodeGroupConfiguration)
workerAvailableSpace, workerNeedGiB, err := p.getMachineConfigDatastoreRequirements(ctx, workerMachineConfig, *workerNodeGroupConfiguration.Count)
if err != nil {
return err
}
updateDatastoreUsageMap(workerMachineConfig, workerNeedGiB, workerAvailableSpace, 0, usage)
}
etcdMachineConfig := vsphereClusterSpec.etcdMachineConfig()
if etcdMachineConfig != nil {
etcdAvailableSpace, etcdNeedGiB, err := p.getMachineConfigDatastoreRequirements(ctx, etcdMachineConfig, vsphereClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count)
if err != nil {
return err
}
updateDatastoreUsageMap(etcdMachineConfig, etcdNeedGiB, etcdAvailableSpace, 0, usage)
}
for datastore, usage := range usage {
if float64(usage.needGiBSpace) > usage.availableSpace {
return fmt.Errorf("not enough space in datastore %v for given diskGiB and count for respective machine groups", datastore)
}
}
return nil
}
func (p *vsphereProvider) UpdateSecrets(ctx context.Context, cluster *types.Cluster, _ *cluster.Spec) error {
var contents bytes.Buffer
err := p.createSecret(ctx, cluster, &contents)
if err != nil {
return err
}
err = p.providerKubectlClient.ApplyKubeSpecFromBytes(ctx, cluster, contents.Bytes())
if err != nil {
return fmt.Errorf("loading secrets object: %v", err)
}
return nil
}
func (p *vsphereProvider) SetupAndValidateDeleteCluster(ctx context.Context, _ *types.Cluster, spec *cluster.Spec) error {
if err := p.validator.validateUpgradeRolloutStrategy(spec); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
if err := SetupEnvVars(spec.VSphereDatacenter); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
return nil
}
func NeedsNewControlPlaneTemplate(oldSpec, newSpec *cluster.Spec, oldVdc, newVdc *v1alpha1.VSphereDatacenterConfig, oldVmc, newVmc *v1alpha1.VSphereMachineConfig) bool {
// Another option is to generate MachineTemplates based on the old and new eksa spec,
// remove the name field and compare them with DeepEqual
// We plan to approach this way since it's more flexible to add/remove fields and test out for validation
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
return AnyImmutableFieldChanged(oldVdc, newVdc, oldVmc, newVmc)
}
func NeedsNewWorkloadTemplate(oldSpec, newSpec *cluster.Spec, oldVdc, newVdc *v1alpha1.VSphereDatacenterConfig, oldVmc, newVmc *v1alpha1.VSphereMachineConfig) bool {
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
if !v1alpha1.WorkerNodeGroupConfigurationSliceTaintsEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) ||
!v1alpha1.WorkerNodeGroupConfigurationsLabelsMapEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) {
return true
}
return AnyImmutableFieldChanged(oldVdc, newVdc, oldVmc, newVmc)
}
func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc *v1alpha1.VSphereMachineConfig, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) bool {
return !v1alpha1.TaintsSliceEqual(newWorkerNodeGroup.Taints, oldWorkerNodeGroup.Taints) || !v1alpha1.MapEqual(newWorkerNodeGroup.Labels, oldWorkerNodeGroup.Labels) ||
!v1alpha1.UsersSliceEqual(oldWorkerNodeVmc.Spec.Users, newWorkerNodeVmc.Spec.Users)
}
func NeedsNewEtcdTemplate(oldSpec, newSpec *cluster.Spec, oldVdc, newVdc *v1alpha1.VSphereDatacenterConfig, oldVmc, newVmc *v1alpha1.VSphereMachineConfig) bool {
if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion {
return true
}
if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number {
return true
}
return AnyImmutableFieldChanged(oldVdc, newVdc, oldVmc, newVmc)
}
func AnyImmutableFieldChanged(oldVdc, newVdc *v1alpha1.VSphereDatacenterConfig, oldVmc, newVmc *v1alpha1.VSphereMachineConfig) bool {
if oldVmc.Spec.NumCPUs != newVmc.Spec.NumCPUs {
return true
}
if oldVmc.Spec.MemoryMiB != newVmc.Spec.MemoryMiB {
return true
}
if oldVmc.Spec.DiskGiB != newVmc.Spec.DiskGiB {
return true
}
if oldVmc.Spec.Datastore != newVmc.Spec.Datastore {
return true
}
if oldVmc.Spec.Folder != newVmc.Spec.Folder {
return true
}
if oldVdc.Spec.Network != newVdc.Spec.Network {
return true
}
if oldVmc.Spec.ResourcePool != newVmc.Spec.ResourcePool {
return true
}
if oldVdc.Spec.Thumbprint != newVdc.Spec.Thumbprint {
return true
}
if oldVmc.Spec.Template != newVmc.Spec.Template {
return true
}
return false
}
func (p *vsphereProvider) generateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
clusterName := newClusterSpec.Cluster.Name
var controlPlaneTemplateName, workloadTemplateName, kubeadmconfigTemplateName, etcdTemplateName string
var needsNewEtcdTemplate bool
c, err := p.providerKubectlClient.GetEksaCluster(ctx, workloadCluster, newClusterSpec.Cluster.Name)
if err != nil {
return nil, nil, err
}
vdc, err := p.providerKubectlClient.GetEksaVSphereDatacenterConfig(ctx, newClusterSpec.VSphereDatacenter.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return nil, nil, err
}
controlPlaneMachineConfig := newClusterSpec.VSphereMachineConfigs[newClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
controlPlaneVmc, err := p.providerKubectlClient.GetEksaVSphereMachineConfig(ctx, c.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return nil, nil, err
}
needsNewControlPlaneTemplate := NeedsNewControlPlaneTemplate(currentSpec, newClusterSpec, vdc, newClusterSpec.VSphereDatacenter, controlPlaneVmc, controlPlaneMachineConfig)
if !needsNewControlPlaneTemplate {
cp, err := p.providerKubectlClient.GetKubeadmControlPlane(ctx, workloadCluster, c.Name, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
controlPlaneTemplateName = cp.Spec.MachineTemplate.InfrastructureRef.Name
} else {
controlPlaneTemplateName = common.CPMachineTemplateName(clusterName, p.templateBuilder.now)
}
previousWorkerNodeGroupConfigs := cluster.BuildMapForWorkerNodeGroupsByName(currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations)
workloadTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
kubeadmconfigTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
oldWorkerNodeVmc, newWorkerNodeVmc, err := p.getWorkerNodeMachineConfigs(ctx, workloadCluster, newClusterSpec, workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs)
if err != nil {
return nil, nil, err
}
needsNewWorkloadTemplate, err := p.needsNewMachineTemplate(currentSpec, newClusterSpec, workerNodeGroupConfiguration, vdc, previousWorkerNodeGroupConfigs, oldWorkerNodeVmc, newWorkerNodeVmc)
if err != nil {
return nil, nil, err
}
needsNewKubeadmConfigTemplate, err := p.needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs, oldWorkerNodeVmc, newWorkerNodeVmc)
if err != nil {
return nil, nil, err
}
if !needsNewKubeadmConfigTemplate {
mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name)
md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
kubeadmconfigTemplateName = md.Spec.Template.Spec.Bootstrap.ConfigRef.Name
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName
} else {
kubeadmconfigTemplateName = common.KubeadmConfigTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName
}
if !needsNewWorkloadTemplate {
mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name)
md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
workloadTemplateName = md.Spec.Template.Spec.InfrastructureRef.Name
workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName
} else {
workloadTemplateName = common.WorkerMachineTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName
}
}
if newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
etcdMachineConfig := newClusterSpec.VSphereMachineConfigs[newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name]
etcdMachineVmc, err := p.providerKubectlClient.GetEksaVSphereMachineConfig(ctx, c.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return nil, nil, err
}
needsNewEtcdTemplate = NeedsNewEtcdTemplate(currentSpec, newClusterSpec, vdc, newClusterSpec.VSphereDatacenter, etcdMachineVmc, etcdMachineConfig)
if !needsNewEtcdTemplate {
etcdadmCluster, err := p.providerKubectlClient.GetEtcdadmCluster(ctx, workloadCluster, clusterName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
etcdTemplateName = etcdadmCluster.Spec.InfrastructureTemplate.Name
} else {
/* During a cluster upgrade, etcd machines need to be upgraded first, so that the etcd machines with new spec get created and can be used by controlplane machines
as etcd endpoints. KCP rollout should not start until then. As a temporary solution in the absence of static etcd endpoints, we annotate the etcd cluster as "upgrading",
so that KCP checks this annotation and does not proceed if etcd cluster is upgrading. The etcdadm controller removes this annotation once the etcd upgrade is complete.
*/
err = p.providerKubectlClient.UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", clusterName),
map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"},
executables.WithCluster(bootstrapCluster),
executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, nil, err
}
etcdTemplateName = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now)
}
}
cpOpt := func(values map[string]interface{}) {
values["controlPlaneTemplateName"] = controlPlaneTemplateName
values["etcdTemplateName"] = etcdTemplateName
}
controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(newClusterSpec, cpOpt)
if err != nil {
return nil, nil, err
}
workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(newClusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames)
if err != nil {
return nil, nil, err
}
return controlPlaneSpec, workersSpec, nil
}
func (p *vsphereProvider) generateCAPISpecForCreate(ctx context.Context, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
clusterName := clusterSpec.Cluster.Name
cpOpt := func(values map[string]interface{}) {
values["controlPlaneTemplateName"] = common.CPMachineTemplateName(clusterName, p.templateBuilder.now)
values["etcdTemplateName"] = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now)
}
controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(clusterSpec, cpOpt)
if err != nil {
return nil, nil, err
}
// TODO(g-gaston): update this to use the new method CAPIWorkersSpecWithInitialNames.
// That implies moving to monotonically increasing names instead of based on timestamp.
// Upgrades should also be moved to that naming scheme for consistency. That requires bigger changes.
workloadTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
kubeadmconfigTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workloadTemplateNames[workerNodeGroupConfiguration.Name] = common.WorkerMachineTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = common.KubeadmConfigTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now)
}
workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(clusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames)
if err != nil {
return nil, nil, err
}
return controlPlaneSpec, workersSpec, nil
}
func (p *vsphereProvider) GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
controlPlaneSpec, workersSpec, err = p.generateCAPISpecForUpgrade(ctx, bootstrapCluster, workloadCluster, currentSpec, clusterSpec)
if err != nil {
return nil, nil, fmt.Errorf("generating cluster api spec contents: %v", err)
}
return controlPlaneSpec, workersSpec, nil
}
func (p *vsphereProvider) GenerateCAPISpecForCreate(ctx context.Context, _ *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) {
controlPlaneSpec, workersSpec, err = p.generateCAPISpecForCreate(ctx, clusterSpec)
if err != nil {
return nil, nil, fmt.Errorf("generating cluster api spec contents: %v", err)
}
return controlPlaneSpec, workersSpec, nil
}
func (p *vsphereProvider) createSecret(ctx context.Context, cluster *types.Cluster, contents *bytes.Buffer) error {
t, err := template.New("tmpl").Funcs(sprig.TxtFuncMap()).Parse(defaultSecretObject)
if err != nil {
return fmt.Errorf("creating secret object template: %v", err)
}
vuc := config.NewVsphereUserConfig()
values := map[string]string{
"vspherePassword": os.Getenv(vSpherePasswordKey),
"vsphereUsername": os.Getenv(vSphereUsernameKey),
"eksaCloudProviderUsername": vuc.EksaVsphereCPUsername,
"eksaCloudProviderPassword": vuc.EksaVsphereCPPassword,
"eksaLicense": os.Getenv(eksaLicense),
"eksaSystemNamespace": constants.EksaSystemNamespace,
"vsphereCredentialsName": constants.VSphereCredentialsName,
"eksaLicenseName": constants.EksaLicenseName,
}
err = t.Execute(contents, values)
if err != nil {
return fmt.Errorf("substituting values for secret object template: %v", err)
}
return nil
}
func (p *vsphereProvider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
return nil
}
func (p *vsphereProvider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return nil
}
func (p *vsphereProvider) PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return nil
}
func (p *vsphereProvider) PostWorkloadInit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
return nil
}
func (p *vsphereProvider) Version(clusterSpec *cluster.Spec) string {
return clusterSpec.VersionsBundle.VSphere.Version
}
func (p *vsphereProvider) EnvMap(_ *cluster.Spec) (map[string]string, error) {
envMap := make(map[string]string)
for _, key := range requiredEnvs {
if env, ok := os.LookupEnv(key); ok && len(env) > 0 {
envMap[key] = env
} else {
return envMap, fmt.Errorf("warning required env not set %s", key)
}
}
return envMap, nil
}
func (p *vsphereProvider) GetDeployments() map[string][]string {
return map[string][]string{
"capv-system": {"capv-controller-manager"},
}
}
func (p *vsphereProvider) GetInfrastructureBundle(clusterSpec *cluster.Spec) *types.InfrastructureBundle {
bundle := clusterSpec.VersionsBundle
folderName := fmt.Sprintf("infrastructure-vsphere/%s/", bundle.VSphere.Version)
infraBundle := types.InfrastructureBundle{
FolderName: folderName,
Manifests: []releasev1alpha1.Manifest{
bundle.VSphere.Components,
bundle.VSphere.Metadata,
bundle.VSphere.ClusterTemplate,
},
}
return &infraBundle
}
func (p *vsphereProvider) DatacenterConfig(spec *cluster.Spec) providers.DatacenterConfig {
return spec.VSphereDatacenter
}
func (p *vsphereProvider) MachineConfigs(spec *cluster.Spec) []providers.MachineConfig {
annotateMachineConfig(
spec,
spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name,
spec.Cluster.ControlPlaneAnnotation(),
"true",
)
if spec.Cluster.Spec.ExternalEtcdConfiguration != nil {
annotateMachineConfig(
spec,
spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name,
spec.Cluster.EtcdAnnotation(),
"true",
)
}
for _, workerNodeGroupConfiguration := range p.clusterConfig.Spec.WorkerNodeGroupConfigurations {
setMachineConfigManagedBy(
spec,
workerNodeGroupConfiguration.MachineGroupRef.Name,
)
}
machineConfigs := make([]providers.MachineConfig, 0, len(spec.VSphereMachineConfigs))
for _, m := range spec.VSphereMachineConfigs {
machineConfigs = append(machineConfigs, m)
}
return machineConfigs
}
func annotateMachineConfig(spec *cluster.Spec, machineConfigName, annotationKey, annotationValue string) {
machineConfig := spec.VSphereMachineConfigs[machineConfigName]
if machineConfig.Annotations == nil {
machineConfig.Annotations = make(map[string]string, 1)
}
machineConfig.Annotations[annotationKey] = annotationValue
setMachineConfigManagedBy(spec, machineConfigName)
}
func setMachineConfigManagedBy(spec *cluster.Spec, machineConfigName string) {
machineConfig := spec.VSphereMachineConfigs[machineConfigName]
if machineConfig.Annotations == nil {
machineConfig.Annotations = make(map[string]string, 1)
}
if spec.Cluster.IsManaged() {
machineConfig.SetManagedBy(spec.Cluster.ManagedBy())
}
}
func (p *vsphereProvider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
prevSpec, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name)
if err != nil {
return err
}
prevDatacenter, err := p.providerKubectlClient.GetEksaVSphereDatacenterConfig(ctx, prevSpec.Spec.DatacenterRef.Name, cluster.KubeconfigFile, prevSpec.Namespace)
if err != nil {
return err
}
datacenter := clusterSpec.VSphereDatacenter
oSpec := prevDatacenter.Spec
nSpec := datacenter.Spec
prevMachineConfigRefs := machineRefSliceToMap(prevSpec.MachineConfigRefs())
for _, machineConfigRef := range clusterSpec.Cluster.MachineConfigRefs() {
machineConfig, ok := clusterSpec.VSphereMachineConfigs[machineConfigRef.Name]
if !ok {
return fmt.Errorf("cannot find machine config %s in vsphere provider machine configs", machineConfigRef.Name)
}
if _, ok = prevMachineConfigRefs[machineConfig.Name]; ok {
err = p.validateMachineConfigImmutability(ctx, cluster, machineConfig, clusterSpec)
if err != nil {
return err
}
}
}
if nSpec.Server != oSpec.Server {
return fmt.Errorf("spec.server is immutable. Previous value %s, new value %s", oSpec.Server, nSpec.Server)
}
if nSpec.Datacenter != oSpec.Datacenter {
return fmt.Errorf("spec.datacenter is immutable. Previous value %s, new value %s", oSpec.Datacenter, nSpec.Datacenter)
}
if nSpec.Network != oSpec.Network {
return fmt.Errorf("spec.network is immutable. Previous value %s, new value %s", oSpec.Network, nSpec.Network)
}
secretChanged, err := p.secretContentsChanged(ctx, cluster)
if err != nil {
return err
}
if secretChanged {
return fmt.Errorf("the VSphere credentials derived from %s and %s are immutable; please use the same credentials for the upgraded cluster", vSpherePasswordKey, vSphereUsernameKey)
}
return nil
}
func (p *vsphereProvider) getWorkerNodeMachineConfigs(ctx context.Context, workloadCluster *types.Cluster, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (*v1alpha1.VSphereMachineConfig, *v1alpha1.VSphereMachineConfig, error) {
if oldWorkerNodeGroup, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
newWorkerMachineConfig := newClusterSpec.VSphereMachineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name]
oldWorkerMachineConfig, err := p.providerKubectlClient.GetEksaVSphereMachineConfig(ctx, oldWorkerNodeGroup.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace)
if err != nil {
return nil, newWorkerMachineConfig, err
}
return oldWorkerMachineConfig, newWorkerMachineConfig, nil
}
return nil, nil, nil
}
func (p *vsphereProvider) needsNewMachineTemplate(currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, vdc *v1alpha1.VSphereDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerMachineConfig *v1alpha1.VSphereMachineConfig, newWorkerMachineConfig *v1alpha1.VSphereMachineConfig) (bool, error) {
if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
needsNewWorkloadTemplate := NeedsNewWorkloadTemplate(currentSpec, newClusterSpec, vdc, newClusterSpec.VSphereDatacenter, oldWorkerMachineConfig, newWorkerMachineConfig)
return needsNewWorkloadTemplate, nil
}
return true, nil
}
func (p *vsphereProvider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc *v1alpha1.VSphereMachineConfig, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) (bool, error) {
if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
existingWorkerNodeGroupConfig := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]
return NeedsNewKubeadmConfigTemplate(&workerNodeGroupConfiguration, &existingWorkerNodeGroupConfig, oldWorkerNodeVmc, newWorkerNodeVmc), nil
}
return true, nil
}
func (p *vsphereProvider) validateMachineConfigImmutability(ctx context.Context, cluster *types.Cluster, newConfig *v1alpha1.VSphereMachineConfig, clusterSpec *cluster.Spec) error {
prevMachineConfig, err := p.providerKubectlClient.GetEksaVSphereMachineConfig(ctx, newConfig.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
}
if newConfig.Spec.StoragePolicyName != prevMachineConfig.Spec.StoragePolicyName {
return fmt.Errorf("spec.storagePolicyName is immutable. Previous value %s, new value %s", prevMachineConfig.Spec.StoragePolicyName, newConfig.Spec.StoragePolicyName)
}
if newConfig.Spec.OSFamily != prevMachineConfig.Spec.OSFamily {
return fmt.Errorf("spec.osFamily is immutable. Previous value %v, new value %v", prevMachineConfig.Spec.OSFamily, newConfig.Spec.OSFamily)
}
return nil
}
func (p *vsphereProvider) secretContentsChanged(ctx context.Context, workloadCluster *types.Cluster) (bool, error) {
nPassword := os.Getenv(vSpherePasswordKey)
oSecret, err := p.providerKubectlClient.GetSecretFromNamespace(ctx, workloadCluster.KubeconfigFile, CredentialsObjectName, constants.EksaSystemNamespace)
if err != nil {
return false, fmt.Errorf("obtaining VSphere secret %s from workload cluster: %v", CredentialsObjectName, err)
}
if string(oSecret.Data["password"]) != nPassword {
return true, nil
}
nUser := os.Getenv(vSphereUsernameKey)
if string(oSecret.Data["username"]) != nUser {
return true, nil
}
return false, nil
}
func (p *vsphereProvider) ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff {
if currentSpec.VersionsBundle.VSphere.Version == newSpec.VersionsBundle.VSphere.Version {
return nil
}
return &types.ComponentChangeDiff{
ComponentName: constants.VSphereProviderName,
NewVersion: newSpec.VersionsBundle.VSphere.Version,
OldVersion: currentSpec.VersionsBundle.VSphere.Version,
}
}
func (p *vsphereProvider) RunPostControlPlaneUpgrade(_ context.Context, _ *cluster.Spec, _ *cluster.Spec, _ *types.Cluster, _ *types.Cluster) error {
return nil
}
func cpiResourceSetName(clusterSpec *cluster.Spec) string {
return fmt.Sprintf("%s-cpi", clusterSpec.Cluster.Name)
}
func (p *vsphereProvider) UpgradeNeeded(ctx context.Context, newSpec, currentSpec *cluster.Spec, cluster *types.Cluster) (bool, error) {
newV, oldV := newSpec.VersionsBundle.VSphere, currentSpec.VersionsBundle.VSphere
if newV.Manager.ImageDigest != oldV.Manager.ImageDigest ||
newV.KubeVip.ImageDigest != oldV.KubeVip.ImageDigest {
return true, nil
}
cc := currentSpec.Cluster
existingVdc, err := p.providerKubectlClient.GetEksaVSphereDatacenterConfig(ctx, cc.Spec.DatacenterRef.Name, cluster.KubeconfigFile, newSpec.Cluster.Namespace)
if err != nil {
return false, err
}
if !reflect.DeepEqual(existingVdc.Spec, newSpec.VSphereDatacenter.Spec) {
logger.V(3).Info("New provider spec is different from the new spec")
return true, nil
}
machineConfigsSpecChanged, err := p.machineConfigsSpecChanged(ctx, cc, cluster, newSpec)
if err != nil {
return false, err
}
return machineConfigsSpecChanged, nil
}
func machineRefSliceToMap(machineRefs []v1alpha1.Ref) map[string]v1alpha1.Ref {
refMap := make(map[string]v1alpha1.Ref, len(machineRefs))
for _, ref := range machineRefs {
refMap[ref.Name] = ref
}
return refMap
}
func machineDeploymentName(clusterName, nodeGroupName string) string {
return fmt.Sprintf("%s-%s", clusterName, nodeGroupName)
}
func (p *vsphereProvider) InstallCustomProviderComponents(ctx context.Context, kubeconfigFile string) error {
return nil
}
func (p *vsphereProvider) PostBootstrapDeleteForUpgrade(ctx context.Context) error {
return nil
}
// PreCoreComponentsUpgrade staisfies the Provider interface.
func (p *vsphereProvider) PreCoreComponentsUpgrade(
ctx context.Context,
cluster *types.Cluster,
clusterSpec *cluster.Spec,
) error {
return nil
}
| 1,132 |
eks-anywhere | aws | Go | package vsphere
import (
"bytes"
"context"
_ "embed"
"encoding/json"
"errors"
"fmt"
"math"
"os"
"path"
"strings"
"testing"
"text/template"
"time"
"github.com/Masterminds/sprig"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/executables"
mockswriter "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/govmomi"
govmomi_mocks "github.com/aws/eks-anywhere/pkg/govmomi/mocks"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/mocks"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
testClusterConfigMainFilename = "cluster_main.yaml"
testClusterConfigMain121Filename = "cluster_main_121.yaml"
testClusterConfigMain121CPOnlyFilename = "cluster_main_121_cp_only.yaml"
testClusterConfigWithCPUpgradeStrategy = "cluster_main_121_cp_upgrade_strategy.yaml"
testClusterConfigWithMDUpgradeStrategy = "cluster_main_121_md_upgrade_strategy.yaml"
testDataDir = "testdata"
expectedVSphereName = "vsphere"
expectedVSphereUsername = "vsphere_username"
expectedVSpherePassword = "vsphere_password"
expectedVSphereServer = "vsphere_server"
expectedExpClusterResourceSet = "expClusterResourceSetKey"
eksd119Release = "kubernetes-1-19-eks-4"
eksd119ReleaseTag = "eksdRelease:kubernetes-1-19-eks-4"
eksd121ReleaseTag = "eksdRelease:kubernetes-1-21-eks-4"
ubuntuOSTag = "os:ubuntu"
bottlerocketOSTag = "os:bottlerocket"
testTemplate = "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6"
)
type DummyProviderGovcClient struct {
osTag string
}
func NewDummyProviderGovcClient() *DummyProviderGovcClient {
return &DummyProviderGovcClient{osTag: ubuntuOSTag}
}
func (pc *DummyProviderGovcClient) TemplateHasSnapshot(ctx context.Context, template string) (bool, error) {
return true, nil
}
func (pc *DummyProviderGovcClient) GetWorkloadAvailableSpace(ctx context.Context, datastore string) (float64, error) {
return math.MaxFloat64, nil
}
func (pc *DummyProviderGovcClient) DeployTemplate(ctx context.Context, datacenterConfig *v1alpha1.VSphereDatacenterConfig) error {
return nil
}
func (pc *DummyProviderGovcClient) ValidateVCenterConnection(ctx context.Context, server string) error {
return nil
}
func (pc *DummyProviderGovcClient) ValidateVCenterAuthentication(ctx context.Context) error {
return nil
}
func (pc *DummyProviderGovcClient) IsCertSelfSigned(ctx context.Context) bool {
return false
}
func (pc *DummyProviderGovcClient) GetCertThumbprint(ctx context.Context) (string, error) {
return "", nil
}
func (pc *DummyProviderGovcClient) ConfigureCertThumbprint(ctx context.Context, server, thumbprint string) error {
return nil
}
func (pc *DummyProviderGovcClient) DatacenterExists(ctx context.Context, datacenter string) (bool, error) {
return true, nil
}
func (pc *DummyProviderGovcClient) NetworkExists(ctx context.Context, network string) (bool, error) {
return true, nil
}
func (pc *DummyProviderGovcClient) ValidateVCenterSetupMachineConfig(ctx context.Context, datacenterConfig *v1alpha1.VSphereDatacenterConfig, machineConfig *v1alpha1.VSphereMachineConfig, selfSigned *bool) error {
return nil
}
func (pc *DummyProviderGovcClient) SearchTemplate(ctx context.Context, datacenter, template string) (string, error) {
return template, nil
}
func (pc *DummyProviderGovcClient) LibraryElementExists(ctx context.Context, library string) (bool, error) {
return true, nil
}
func (pc *DummyProviderGovcClient) GetLibraryElementContentVersion(ctx context.Context, element string) (string, error) {
return "", nil
}
func (pc *DummyProviderGovcClient) DeleteLibraryElement(ctx context.Context, element string) error {
return nil
}
func (pc *DummyProviderGovcClient) CreateLibrary(ctx context.Context, datastore, library string) error {
return nil
}
func (pc *DummyProviderGovcClient) DeployTemplateFromLibrary(ctx context.Context, templateDir, templateName, library, datacenter, datastore, network, resourcePool string, resizeDisk2 bool) error {
return nil
}
func (pc *DummyProviderGovcClient) ResizeDisk(ctx context.Context, template, diskName string, diskSizeInGB int) error {
return nil
}
func (pc *DummyProviderGovcClient) ImportTemplate(ctx context.Context, library, ovaURL, name string) error {
return nil
}
func (pc *DummyProviderGovcClient) GetVMDiskSizeInGB(ctx context.Context, vm, datacenter string) (int, error) {
return 25, nil
}
func (pc *DummyProviderGovcClient) GetHardDiskSize(ctx context.Context, vm, datacenter string) (map[string]float64, error) {
return map[string]float64{"Hard disk 1": 23068672}, nil
}
func (pc *DummyProviderGovcClient) GetTags(ctx context.Context, path string) (tags []string, err error) {
return []string{eksd119ReleaseTag, eksd121ReleaseTag, pc.osTag}, nil
}
func (pc *DummyProviderGovcClient) ListTags(ctx context.Context) ([]executables.Tag, error) {
return nil, nil
}
func (pc *DummyProviderGovcClient) CreateTag(ctx context.Context, tag, category string) error {
return nil
}
func (pc *DummyProviderGovcClient) AddTag(ctx context.Context, path, tag string) error {
return nil
}
func (pc *DummyProviderGovcClient) ListCategories(ctx context.Context) ([]string, error) {
return nil, nil
}
func (pc *DummyProviderGovcClient) CreateCategoryForVM(ctx context.Context, name string) error {
return nil
}
func (pc *DummyProviderGovcClient) AddUserToGroup(ctx context.Context, name string, username string) error {
return nil
}
func (pc *DummyProviderGovcClient) CreateGroup(ctx context.Context, name string) error {
return nil
}
func (pc *DummyProviderGovcClient) CreateRole(ctx context.Context, name string, privileges []string) error {
return nil
}
func (pc *DummyProviderGovcClient) CreateUser(ctx context.Context, username string, password string) error {
return nil
}
func (pc *DummyProviderGovcClient) UserExists(ctx context.Context, username string) (bool, error) {
return true, nil
}
func (pc *DummyProviderGovcClient) GroupExists(ctx context.Context, name string) (bool, error) {
return true, nil
}
func (pc *DummyProviderGovcClient) RoleExists(ctx context.Context, name string) (bool, error) {
return false, nil
}
func (pc *DummyProviderGovcClient) SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error {
return nil
}
func givenClusterConfig(t *testing.T, fileName string) *v1alpha1.Cluster {
return givenClusterSpec(t, fileName).Cluster
}
func givenClusterSpec(t *testing.T, fileName string) *cluster.Spec {
return test.NewFullClusterSpec(t, path.Join(testDataDir, fileName))
}
func givenEmptyClusterSpec() *cluster.Spec {
return test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.KubeVersion = "1.19"
s.VersionsBundle.EksD.Name = eksd119Release
s.Cluster.Namespace = "test-namespace"
s.VSphereDatacenter = &v1alpha1.VSphereDatacenterConfig{}
})
}
func givenDatacenterConfig(t *testing.T, fileName string) *v1alpha1.VSphereDatacenterConfig {
datacenterConfig, err := v1alpha1.GetVSphereDatacenterConfig(path.Join(testDataDir, fileName))
if err != nil {
t.Fatalf("unable to get datacenter config from file: %v", err)
}
return datacenterConfig
}
func givenProvider(t *testing.T) *vsphereProvider {
mockCtrl := gomock.NewController(t)
clusterConfig := givenClusterConfig(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider := newProviderWithKubectl(
t,
datacenterConfig,
clusterConfig,
nil,
ipValidator,
)
if provider == nil {
t.Fatalf("provider object is nil")
}
return provider
}
func workerNodeGroup1MachineDeployment() *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-0-template-1234567890000",
},
},
},
},
},
}
}
func workerNodeGroup2MachineDeployment() *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-1-template-1234567890000",
},
},
},
},
},
}
}
func setupContext(t *testing.T) {
t.Setenv(config.EksavSphereUsernameKey, expectedVSphereUsername)
t.Setenv(vSphereUsernameKey, os.Getenv(config.EksavSphereUsernameKey))
t.Setenv(config.EksavSpherePasswordKey, expectedVSpherePassword)
t.Setenv(vSpherePasswordKey, os.Getenv(config.EksavSpherePasswordKey))
t.Setenv(vSphereServerKey, expectedVSphereServer)
t.Setenv(expClusterResourceSetKey, expectedExpClusterResourceSet)
}
type providerTest struct {
*WithT
t *testing.T
ctx context.Context
managementCluster, workloadCluster *types.Cluster
provider *vsphereProvider
cluster *v1alpha1.Cluster
clusterSpec *cluster.Spec
datacenterConfig *v1alpha1.VSphereDatacenterConfig
machineConfigs map[string]*v1alpha1.VSphereMachineConfig
kubectl *mocks.MockProviderKubectlClient
govc *mocks.MockProviderGovcClient
clientBuilder *mockVSphereClientBuilder
ipValidator *mocks.MockIPValidator
}
func newProviderTest(t *testing.T) *providerTest {
setupContext(t)
ctrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(ctrl)
govc := mocks.NewMockProviderGovcClient(ctrl)
vscb, _ := newMockVSphereClientBuilder(ctrl)
ipValidator := mocks.NewMockIPValidator(ctrl)
spec := givenClusterSpec(t, testClusterConfigMainFilename)
p := &providerTest{
t: t,
WithT: NewWithT(t),
ctx: context.Background(),
managementCluster: &types.Cluster{
Name: "m-cluster",
KubeconfigFile: "kubeconfig-m.kubeconfig",
},
workloadCluster: &types.Cluster{
Name: "test",
KubeconfigFile: "kubeconfig-w.kubeconfig",
},
cluster: spec.Cluster,
clusterSpec: spec,
datacenterConfig: spec.VSphereDatacenter,
machineConfigs: spec.VSphereMachineConfigs,
kubectl: kubectl,
govc: govc,
clientBuilder: vscb,
ipValidator: ipValidator,
}
p.buildNewProvider()
return p
}
func (tt *providerTest) setExpectationsForDefaultDiskAndCloneModeGovcCalls() {
for _, m := range tt.machineConfigs {
tt.govc.EXPECT().GetVMDiskSizeInGB(tt.ctx, m.Spec.Template, tt.datacenterConfig.Spec.Datacenter).Return(25, nil)
tt.govc.EXPECT().TemplateHasSnapshot(tt.ctx, m.Spec.Template).Return(true, nil)
}
}
func (tt *providerTest) setExpectationForVCenterValidation() {
tt.govc.EXPECT().IsCertSelfSigned(tt.ctx).Return(false)
tt.govc.EXPECT().DatacenterExists(tt.ctx, tt.datacenterConfig.Spec.Datacenter).Return(true, nil)
tt.govc.EXPECT().NetworkExists(tt.ctx, tt.datacenterConfig.Spec.Network).Return(true, nil)
}
func (tt *providerTest) setExpectationForSetup() {
tt.govc.EXPECT().ValidateVCenterConnection(tt.ctx, tt.datacenterConfig.Spec.Server).Return(nil)
tt.govc.EXPECT().ValidateVCenterAuthentication(tt.ctx).Return(nil)
tt.govc.EXPECT().ConfigureCertThumbprint(tt.ctx, tt.datacenterConfig.Spec.Server, tt.datacenterConfig.Spec.Thumbprint).Return(nil)
}
func (tt *providerTest) setExpectationsForMachineConfigsVCenterValidation() {
for _, m := range tt.machineConfigs {
var b bool
tt.govc.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, tt.datacenterConfig, m, &b).Return(nil)
}
}
func (tt *providerTest) buildNewProvider() {
tt.provider = newProvider(
tt.t,
tt.clusterSpec.VSphereDatacenter,
tt.clusterSpec.Cluster,
tt.govc,
tt.kubectl,
NewValidator(tt.govc, tt.clientBuilder),
tt.ipValidator,
)
}
func TestNewProvider(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterConfig := givenClusterConfig(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
govc := NewDummyProviderGovcClient()
ipValidator := mocks.NewMockIPValidator(mockCtrl)
_, writer := test.NewWriter(t)
skipIPCheck := true
provider := NewProvider(
datacenterConfig,
clusterConfig,
govc,
kubectl,
writer,
ipValidator,
time.Now,
skipIPCheck,
)
if provider == nil {
t.Fatalf("provider object is nil")
}
if provider.validator == nil {
t.Fatalf("validator not configured")
}
}
func TestNewProviderCustomNet(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterConfig := givenClusterConfig(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider := newProviderWithKubectl(
t,
datacenterConfig,
clusterConfig,
kubectl,
ipValidator,
)
if provider == nil {
t.Fatalf("provider object is nil")
}
}
func newProviderWithKubectl(t *testing.T, datacenterConfig *v1alpha1.VSphereDatacenterConfig, clusterConfig *v1alpha1.Cluster, kubectl ProviderKubectlClient, ipValidator IPValidator) *vsphereProvider {
ctrl := gomock.NewController(t)
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(ctrl)
v := NewValidator(govc, vscb)
return newProvider(
t,
datacenterConfig,
clusterConfig,
govc,
kubectl,
v,
ipValidator,
)
}
func newProviderWithGovc(t *testing.T, datacenterConfig *v1alpha1.VSphereDatacenterConfig, clusterConfig *v1alpha1.Cluster, govc ProviderGovcClient) *vsphereProvider {
ctrl := gomock.NewController(t)
vscb, _ := newMockVSphereClientBuilder(ctrl)
v := NewValidator(govc, vscb)
kubectl := mocks.NewMockProviderKubectlClient(ctrl)
ipValidator := mocks.NewMockIPValidator(ctrl)
return newProvider(
t,
datacenterConfig,
clusterConfig,
govc,
kubectl,
v,
ipValidator,
)
}
type mockVSphereClientBuilder struct {
vsc *govmomi_mocks.MockVSphereClient
}
func (mvscb *mockVSphereClientBuilder) Build(ctx context.Context, host string, username string, password string, insecure bool, datacenter string) (govmomi.VSphereClient, error) {
return mvscb.vsc, nil
}
func setDefaultVSphereClientMock(vsc *govmomi_mocks.MockVSphereClient) error {
vsc.EXPECT().Username().Return("foobar").AnyTimes()
var privs []string
err := json.Unmarshal([]byte(config.VSphereAdminPrivsFile), &privs)
if err != nil {
return err
}
vsc.EXPECT().GetPrivsOnEntity(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(privs, nil).AnyTimes()
return nil
}
func newMockVSphereClientBuilder(ctrl *gomock.Controller) (*mockVSphereClientBuilder, error) {
vsc := govmomi_mocks.NewMockVSphereClient(ctrl)
err := setDefaultVSphereClientMock(vsc)
mvscb := mockVSphereClientBuilder{vsc}
return &mvscb, err
}
func newProvider(t *testing.T, datacenterConfig *v1alpha1.VSphereDatacenterConfig, clusterConfig *v1alpha1.Cluster, govc ProviderGovcClient, kubectl ProviderKubectlClient, v *Validator, ipValidator IPValidator) *vsphereProvider {
_, writer := test.NewWriter(t)
return NewProviderCustomNet(
datacenterConfig,
clusterConfig,
govc,
kubectl,
writer,
ipValidator,
test.FakeNow,
false,
v,
)
}
func TestProviderGenerateCAPISpecForUpgradeUpdateMachineTemplate(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantCPFile string
wantMDFile string
}{
{
testName: "minimal",
clusterconfigFile: "cluster_minimal.yaml",
wantCPFile: "testdata/expected_results_minimal_cp.yaml",
wantMDFile: "testdata/expected_results_minimal_md.yaml",
},
{
testName: "minimal_autoscaler",
clusterconfigFile: "cluster_minimal_autoscaling.yaml",
wantCPFile: "testdata/expected_results_minimal_cp.yaml",
wantMDFile: "testdata/expected_results_minimal_autoscaling_md.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
vsphereDatacenter := &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{},
}
vsphereMachineConfig := firstMachineConfig(clusterSpec).DeepCopy()
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec.DeepCopy())
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), tt.wantCPFile)
test.AssertContentToFile(t, string(md), tt.wantMDFile)
})
}
}
func firstMachineConfig(spec *cluster.Spec) *v1alpha1.VSphereMachineConfig {
var mc *v1alpha1.VSphereMachineConfig
for _, m := range spec.VSphereMachineConfigs {
mc = m
break
}
return mc
}
func getMachineConfig(spec *cluster.Spec, name string) *v1alpha1.VSphereMachineConfig {
if mc, ok := spec.VSphereMachineConfigs[name]; ok {
return mc
}
return nil
}
func TestProviderGenerateCAPISpecForUpgradeOIDC(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantCPFile string
}{
{
testName: "with minimal oidc",
clusterconfigFile: "cluster_minimal_oidc.yaml",
wantCPFile: "testdata/expected_results_minimal_oidc_cp.yaml",
},
{
testName: "with full oidc",
clusterconfigFile: "cluster_full_oidc.yaml",
wantCPFile: "testdata/expected_results_full_oidc_cp.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
vsphereDatacenter := &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{},
}
vsphereMachineConfig := firstMachineConfig(clusterSpec).DeepCopy()
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec.DeepCopy())
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), tt.wantCPFile)
})
}
}
func TestProviderGenerateCAPISpecForUpgradeMultipleWorkerNodeGroups(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantMDFile string
}{
{
testName: "adding a worker node group",
clusterconfigFile: "cluster_main_multiple_worker_node_groups.yaml",
wantMDFile: "testdata/expected_results_minimal_add_worker_node_group.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
vsphereDatacenter := &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{},
}
vsphereMachineConfig := firstMachineConfig(clusterSpec).DeepCopy()
newClusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
newConfig := v1alpha1.WorkerNodeGroupConfiguration{Count: ptr.Int(1), MachineGroupRef: &v1alpha1.Ref{Name: "test-wn", Kind: "VSphereMachineConfig"}, Name: "md-2"}
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = append(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newConfig)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup2MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil).AnyTimes()
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", cluster.Name), map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"}, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster)))
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
_, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, newClusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(md), tt.wantMDFile)
})
}
}
func TestProviderGenerateCAPISpecForUpgradeUpdateMachineTemplateExternalEtcd(t *testing.T) {
tests := []struct {
testName string
clusterconfigFile string
wantCPFile string
wantMDFile string
}{
{
testName: "main",
clusterconfigFile: testClusterConfigMainFilename,
wantCPFile: "testdata/expected_results_main_cp.yaml",
wantMDFile: "testdata/expected_results_main_md.yaml",
},
{
testName: "main_with_taints",
clusterconfigFile: "cluster_main_with_taints.yaml",
wantCPFile: "testdata/expected_results_main_with_taints_cp.yaml",
wantMDFile: "testdata/expected_results_main_with_taints_md.yaml",
},
{
testName: "main with node labels",
clusterconfigFile: "cluster_main_with_node_labels.yaml",
wantCPFile: "testdata/expected_results_main_cp.yaml",
wantMDFile: "testdata/expected_results_main_node_labels_md.yaml",
},
{
testName: "main with cp node labels",
clusterconfigFile: "cluster_main_with_cp_node_labels.yaml",
wantCPFile: "testdata/expected_results_main_node_labels_cp.yaml",
wantMDFile: "testdata/expected_results_main_md.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, tt.clusterconfigFile)
vsphereDatacenter := &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{},
}
vsphereMachineConfig := firstMachineConfig(clusterSpec).DeepCopy()
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", cluster.Name), map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"}, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster)))
datacenterConfig := givenDatacenterConfig(t, tt.clusterconfigFile)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), tt.wantCPFile)
test.AssertContentToFile(t, string(md), tt.wantMDFile)
})
}
}
func TestProviderGenerateCAPISpecForUpgradeNotUpdateMachineTemplate(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
oldCP := &controlplanev1.KubeadmControlPlane{
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
Name: "test-control-plane-template-original",
},
},
},
}
oldMD := &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
InfrastructureRef: v1.ObjectReference{
Name: "test-md-0-original",
},
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "test-md-0-template-original",
},
},
},
},
},
}
etcdadmCluster := &etcdv1.EtcdadmCluster{
Spec: etcdv1.EtcdadmClusterSpec{
InfrastructureTemplate: v1.ObjectReference{
Name: "test-etcd-template-original",
},
},
}
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
machineDeploymentName := fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(datacenterConfig, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, controlPlaneMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName], nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, workerNodeMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName], nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, etcdMachineConfigName, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[etcdMachineConfigName], nil)
kubectl.EXPECT().GetKubeadmControlPlane(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldCP, nil)
kubectl.EXPECT().GetMachineDeployment(ctx, machineDeploymentName, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(oldMD, nil).Times(2)
kubectl.EXPECT().GetEtcdadmCluster(ctx, cluster, clusterSpec.Cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(etcdadmCluster, nil)
cp, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, clusterSpec.DeepCopy())
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_no_machinetemplate_update_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_no_machinetemplate_update_md.yaml")
}
func TestProviderGenerateCAPISpecForCreate(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithControlPlaneTags(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
datacenterConfig := clusterSpec.VSphereDatacenter
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithMultipleWorkerNodeGroups(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, "cluster_main_multiple_worker_node_groups.yaml")
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
datacenterConfig := givenDatacenterConfig(t, "cluster_main_multiple_worker_node_groups.yaml")
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
_, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_multiple_worker_node_groups.yaml")
}
func TestProviderGenerateCAPISpecForUpgradeUpdateMachineGroupRefName(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
bootstrapCluster := &types.Cluster{
Name: "bootstrap-test",
}
clusterSpec := givenClusterSpec(t, "cluster_main.yaml")
vsphereDatacenter := &v1alpha1.VSphereDatacenterConfig{
Spec: v1alpha1.VSphereDatacenterConfigSpec{},
}
vsphereMachineConfig := firstMachineConfig(clusterSpec).DeepCopy()
wnMachineConfig := getMachineConfig(clusterSpec, "test-wn")
newClusterSpec := clusterSpec.DeepCopy()
newMachineConfigName := "new-test-wn"
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = newMachineConfigName
newWorkerMachineConfig := wnMachineConfig.DeepCopy()
newWorkerMachineConfig.Name = newMachineConfigName
newClusterSpec.VSphereMachineConfigs[newMachineConfigName] = newWorkerMachineConfig
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
kubectl.EXPECT().GetMachineDeployment(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(workerNodeGroup1MachineDeployment(), nil)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, cluster.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(wnMachineConfig, nil).AnyTimes()
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(vsphereMachineConfig, nil)
kubectl.EXPECT().UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", cluster.Name), map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"}, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster)))
datacenterConfig := givenDatacenterConfig(t, "cluster_main.yaml")
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
provider.templateBuilder.now = test.NewFakeNow
_, md, err := provider.GenerateCAPISpecForUpgrade(context.Background(), bootstrapCluster, cluster, clusterSpec, newClusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md_update_machine_template.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithBottlerocketAndExternalEtcd(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_external_etcd.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
govc.osTag = bottlerocketOSTag
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_external_etcd_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_external_etcd_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottleRocketWithMirrorConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_mirror_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
govc.osTag = bottlerocketOSTag
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_mirror_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_mirror_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottleRocketWithMirrorAndCertConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_mirror_with_cert_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
govc.osTag = bottlerocketOSTag
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_mirror_config_with_cert_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_mirror_config_with_cert_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottleRocketWithMirrorAuthConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_mirror_with_auth_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
govc.osTag = bottlerocketOSTag
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_mirror_config_with_auth_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_mirror_config_with_auth_md.yaml")
}
func TestProviderGenerateDeploymentFileWithMirrorConfig(t *testing.T) {
clusterSpecManifest := "cluster_mirror_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
govc.osTag = ubuntuOSTag
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_config_md.yaml")
}
func TestProviderGenerateDeploymentFileWithMirrorAndCertConfig(t *testing.T) {
clusterSpecManifest := "cluster_mirror_with_cert_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
govc.osTag = ubuntuOSTag
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_config_with_cert_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_config_with_cert_md.yaml")
}
func TestProviderGenerateDeploymentFileWithMirrorAuth(t *testing.T) {
clusterSpecManifest := "cluster_mirror_with_auth_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
if err := os.Setenv("REGISTRY_USERNAME", "username"); err != nil {
t.Fatalf(err.Error())
}
if err := os.Setenv("REGISTRY_PASSWORD", "password"); err != nil {
t.Fatalf(err.Error())
}
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
govc.osTag = ubuntuOSTag
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_with_auth_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_with_auth_config_md.yaml")
}
func TestUpdateKubeConfig(t *testing.T) {
provider := givenProvider(t)
content := []byte{}
err := provider.UpdateKubeConfig(&content, "clusterName")
if err != nil {
t.Fatalf("failed UpdateKubeConfig: %v", err)
}
}
func TestBootstrapClusterOpts(t *testing.T) {
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
bootstrapClusterOps, err := provider.BootstrapClusterOpts(clusterSpec)
if err != nil {
t.Fatalf("failed BootstrapClusterOpts: %v", err)
}
if bootstrapClusterOps == nil {
t.Fatalf("expected BootstrapClusterOpts")
}
}
func TestName(t *testing.T) {
provider := givenProvider(t)
if provider.Name() != expectedVSphereName {
t.Fatalf("unexpected Name %s!=%s", provider.Name(), expectedVSphereName)
}
}
func TestSetupAndValidateCreateCluster(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
provider.ipValidator = ipValidator
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func thenErrorPrefixExpected(t *testing.T, expected string, err error) {
if err == nil {
t.Fatalf("Expected=<%s> actual=<nil>", expected)
}
actual := err.Error()
if !strings.HasPrefix(actual, expected) {
t.Fatalf("Expected=<%s...> actual=<%s...>", expected, actual)
}
}
func thenErrorExpected(t *testing.T, expected string, err error) {
if err == nil {
t.Fatalf("Expected=<%s> actual=<nil>", expected)
}
actual := err.Error()
if expected != actual {
t.Fatalf("Expected=<%s> actual=<%s>", expected, actual)
}
}
func TestSetupAndValidateCreateClusterNoUsername(t *testing.T) {
ctx := context.Background()
clusterSpec := givenEmptyClusterSpec()
provider := givenProvider(t)
setupContext(t)
os.Unsetenv(config.EksavSphereUsernameKey)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "failed setup and validations: EKSA_VSPHERE_USERNAME is not set or is empty", err)
}
func TestSetupAndValidateCreateClusterNoPassword(t *testing.T) {
ctx := context.Background()
clusterSpec := givenEmptyClusterSpec()
provider := givenProvider(t)
setupContext(t)
os.Unsetenv(config.EksavSpherePasswordKey)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "failed setup and validations: EKSA_VSPHERE_PASSWORD is not set or is empty", err)
}
func TestSetupAndValidateCreateCPUpgradeRolloutStrategy(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigWithCPUpgradeStrategy)
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "failed setup and validations: Upgrade rollout strategy customization is not supported for vSphere provider", err)
}
func TestSetupAndValidateCreateMDUpgradeRolloutStrategy(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigWithMDUpgradeStrategy)
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "failed setup and validations: Upgrade rollout strategy customization is not supported for vSphere provider", err)
}
func TestSetupAndValidateUpgradeCPUpgradeRolloutStrategy(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigWithCPUpgradeStrategy)
cluster := &types.Cluster{}
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
thenErrorExpected(t, "failed setup and validations: Upgrade rollout strategy customization is not supported for vSphere provider", err)
}
func TestSetupAndValidateUpgradeMDUpgradeRolloutStrategy(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigWithMDUpgradeStrategy)
cluster := &types.Cluster{}
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
thenErrorExpected(t, "failed setup and validations: Upgrade rollout strategy customization is not supported for vSphere provider", err)
}
func TestSetupAndValidateDeleteCPUpgradeRolloutStrategy(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigWithCPUpgradeStrategy)
provider := givenProvider(t)
tt := newProviderTest(t)
setupContext(t)
err := provider.SetupAndValidateDeleteCluster(ctx, tt.managementCluster, clusterSpec)
thenErrorExpected(t, "failed setup and validations: Upgrade rollout strategy customization is not supported for vSphere provider", err)
}
func TestSetupAndValidateDeleteMDUpgradeRolloutStrategy(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigWithMDUpgradeStrategy)
provider := givenProvider(t)
tt := newProviderTest(t)
setupContext(t)
err := provider.SetupAndValidateDeleteCluster(ctx, tt.managementCluster, clusterSpec)
thenErrorExpected(t, "failed setup and validations: Upgrade rollout strategy customization is not supported for vSphere provider", err)
}
func TestSetupAndValidateCreateWorkloadClusterSuccess(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().SearchVsphereMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.VSphereMachineConfig{}, nil)
}
kubectl.EXPECT().SearchVsphereDatacenterConfig(context.TODO(), datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.VSphereDatacenterConfig{}, nil)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestSetupAndValidateCreateWorkloadClusterFailsIfMachineExists(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
idx := 0
var existingMachine string
for _, config := range clusterSpec.VSphereMachineConfigs {
if idx == 0 {
kubectl.EXPECT().SearchVsphereMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.VSphereMachineConfig{config}, nil)
existingMachine = config.Name
} else {
kubectl.EXPECT().SearchVsphereMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.VSphereMachineConfig{}, nil).MaxTimes(1)
}
idx++
}
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, fmt.Sprintf("VSphereMachineConfig %s already exists", existingMachine), err)
}
func TestSetupAndValidateSelfManagedClusterSkipMachineNameValidateSuccess(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
kubectl.EXPECT().SearchVsphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestSetupAndValidateCreateWorkloadClusterFailsIfDatacenterExists(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
clusterSpec.Cluster.SetManagedBy("management-cluster")
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
for _, config := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().SearchVsphereMachineConfig(context.TODO(), config.Name, clusterSpec.ManagementCluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.VSphereMachineConfig{}, nil)
}
kubectl.EXPECT().SearchVsphereDatacenterConfig(context.TODO(), datacenterConfig.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return([]*v1alpha1.VSphereDatacenterConfig{datacenterConfig}, nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, fmt.Sprintf("VSphereDatacenter %s already exists", datacenterConfig.Name), err)
}
func TestSetupAndValidateSelfManagedClusterSkipDatacenterNameValidateSuccess(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
clusterSpec.ManagementCluster = &types.Cluster{
Name: "management-cluster",
KubeconfigFile: "kc.kubeconfig",
ExistingManagement: true,
}
kubectl.EXPECT().SearchVsphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
kubectl.EXPECT().SearchVsphereDatacenterConfig(context.TODO(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestSetupAndValidateDeleteCluster(t *testing.T) {
tt := newProviderTest(t)
tt.Expect(
tt.provider.SetupAndValidateDeleteCluster(tt.ctx, tt.managementCluster, tt.clusterSpec),
).To(Succeed())
}
func TestSetupAndValidateDeleteClusterNoPassword(t *testing.T) {
tt := newProviderTest(t)
os.Unsetenv(config.EksavSpherePasswordKey)
err := tt.provider.SetupAndValidateDeleteCluster(tt.ctx, tt.managementCluster, tt.clusterSpec)
thenErrorExpected(t, "failed setup and validations: EKSA_VSPHERE_PASSWORD is not set or is empty", err)
}
func TestSetupAndValidateUpgradeCluster(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cluster := &types.Cluster{}
provider := givenProvider(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
setupContext(t)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).Times(2)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Times(3)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateUpgradeClusterNoUsername(t *testing.T) {
ctx := context.Background()
clusterSpec := givenEmptyClusterSpec()
provider := givenProvider(t)
setupContext(t)
os.Unsetenv(config.EksavSphereUsernameKey)
cluster := &types.Cluster{}
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
thenErrorExpected(t, "failed setup and validations: EKSA_VSPHERE_USERNAME is not set or is empty", err)
}
func TestSetupAndValidateUpgradeClusterNoPassword(t *testing.T) {
ctx := context.Background()
clusterSpec := givenEmptyClusterSpec()
provider := givenProvider(t)
setupContext(t)
os.Unsetenv(config.EksavSpherePasswordKey)
cluster := &types.Cluster{}
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
thenErrorExpected(t, "failed setup and validations: EKSA_VSPHERE_PASSWORD is not set or is empty", err)
}
func TestSetupAndValidateUpgradeClusterDatastoreUsageError(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cluster := &types.Cluster{}
provider := givenProvider(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
setupContext(t)
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Return(nil, fmt.Errorf("error"))
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
thenErrorExpected(t, "validating vsphere machine configs datastore usage: calculating datastore usage: error", err)
}
func TestSetupAndValidateUpgradeClusterCPSshNotExists(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).Times(2)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Times(3)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateUpgradeClusterWorkerSshNotExists(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).Times(2)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Times(3)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateUpgradeClusterEtcdSshNotExists(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).Times(2)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Times(3)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestSetupAndValidateUpgradeClusterSameMachineConfigforCPandEtcd(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = etcdMachineConfigName
setupContext(t)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).Times(2)
for _, mc := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Return(mc, nil)
}
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestVersion(t *testing.T) {
vSphereProviderVersion := "v0.7.10"
provider := givenProvider(t)
clusterSpec := givenEmptyClusterSpec()
clusterSpec.VersionsBundle.VSphere.Version = vSphereProviderVersion
setupContext(t)
result := provider.Version(clusterSpec)
if result != vSphereProviderVersion {
t.Fatalf("Unexpected version expected <%s> actual=<%s>", vSphereProviderVersion, result)
}
}
func TestProviderBootstrapSetup(t *testing.T) {
ctx := context.Background()
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
clusterConfig := givenClusterConfig(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider := newProviderWithKubectl(t, datacenterConfig, clusterConfig, kubectl, ipValidator)
cluster := types.Cluster{
Name: "test",
KubeconfigFile: "",
}
values := map[string]string{
"clusterName": clusterConfig.Name,
"vspherePassword": expectedVSphereUsername,
"vsphereUsername": expectedVSpherePassword,
"eksaCloudProviderUsername": expectedVSphereUsername,
"eksaCloudProviderPassword": expectedVSpherePassword,
"vsphereServer": datacenterConfig.Spec.Server,
"vsphereDatacenter": datacenterConfig.Spec.Datacenter,
"vsphereNetwork": datacenterConfig.Spec.Network,
"eksaLicense": "",
}
setupContext(t)
tpl, err := template.New("test").Funcs(sprig.TxtFuncMap()).Parse(defaultSecretObject)
if err != nil {
t.Fatalf("template create error: %v", err)
}
err = tpl.Execute(&bytes.Buffer{}, values)
if err != nil {
t.Fatalf("template execute error: %v", err)
}
err = provider.PostBootstrapSetup(ctx, clusterConfig, &cluster)
if err != nil {
t.Fatalf("BootstrapSetup error %v", err)
}
}
func TestProviderUpdateSecretSuccess(t *testing.T) {
ctx := context.Background()
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
clusterConfig := givenClusterConfig(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider := newProviderWithKubectl(t, datacenterConfig, clusterConfig, kubectl, ipValidator)
cluster := types.Cluster{
Name: "test",
KubeconfigFile: "",
}
values := map[string]string{
"clusterName": clusterConfig.Name,
"vspherePassword": expectedVSphereUsername,
"vsphereUsername": expectedVSpherePassword,
"eksaCloudProviderUsername": expectedVSphereUsername,
"eksaCloudProviderPassword": expectedVSpherePassword,
"eksaLicense": "",
"eksaSystemNamespace": constants.EksaSystemNamespace,
}
setupContext(t)
kubectl.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), gomock.Any())
template, err := template.New("test").Funcs(sprig.TxtFuncMap()).Parse(defaultSecretObject)
if err != nil {
t.Fatalf("template create error: %v", err)
}
err = template.Execute(&bytes.Buffer{}, values)
if err != nil {
t.Fatalf("template execute error: %v", err)
}
err = provider.UpdateSecrets(ctx, &cluster, nil)
if err != nil {
t.Fatalf("UpdateSecrets error %v", err)
}
}
func TestSetupAndValidateCreateClusterNoServer(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.VSphereDatacenter.Spec.Server = ""
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "VSphereDatacenterConfig server is not set or is empty", err)
}
func TestSetupAndValidateCreateClusterInsecure(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.VSphereDatacenter.Spec.Insecure = true
provider := givenProvider(t)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("Unexpected error <%v>", err)
}
if clusterSpec.VSphereDatacenter.Spec.Thumbprint != "" {
t.Fatalf("Expected=<> actual=<%s>", clusterSpec.VSphereDatacenter.Spec.Thumbprint)
}
}
func TestSetupAndValidateCreateClusterNoDatacenter(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.VSphereDatacenter.Spec.Datacenter = ""
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "VSphereDatacenterConfig datacenter is not set or is empty", err)
}
func TestSetupAndValidateCreateClusterNoNetwork(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.VSphereDatacenter.Spec.Network = ""
provider := givenProvider(t)
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "VSphereDatacenterConfig VM network is not set or is empty", err)
}
func TestSetupAndValidateCreateClusterNotControlPlaneVMsMemoryMiB(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.MemoryMiB = 0
provider := givenProvider(t)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("Unexpected error <%v>", err)
}
if clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.MemoryMiB != 8192 {
t.Fatalf("Expected=<8192> actual=<%d>", clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.MemoryMiB)
}
}
func TestSetupAndValidateCreateClusterNotControlPlaneVMsNumCPUs(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.NumCPUs = 0
provider := givenProvider(t)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("Unexpected error <%v>", err)
}
if clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.NumCPUs != 2 {
t.Fatalf("Expected=<2> actual=<%d>", clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.NumCPUs)
}
}
func TestSetupAndValidateCreateClusterNotWorkloadVMsMemoryMiB(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.MemoryMiB = 0
provider := givenProvider(t)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("Unexpected error <%v>", err)
}
if clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.MemoryMiB != 8192 {
t.Fatalf("Expected=<8192> actual=<%d>", clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.MemoryMiB)
}
}
func TestSetupAndValidateCreateClusterNotWorkloadVMsNumCPUs(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.NumCPUs = 0
provider := givenProvider(t)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("Unexpected error <%v>", err)
}
if clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.NumCPUs != 2 {
t.Fatalf("Expected=<2> actual=<%d>", clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.NumCPUs)
}
}
func TestSetupAndValidateCreateClusterNotEtcdVMsMemoryMiB(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.MemoryMiB = 0
provider := givenProvider(t)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("Unexpected error <%v>", err)
}
if clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.MemoryMiB != 8192 {
t.Fatalf("Expected=<8192> actual=<%d>", clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.MemoryMiB)
}
}
func TestSetupAndValidateCreateClusterNotEtcdVMsNumCPUs(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.NumCPUs = 0
provider := givenProvider(t)
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("Unexpected error <%v>", err)
}
if clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.NumCPUs != 2 {
t.Fatalf("Expected=<2> actual=<%d>", clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.NumCPUs)
}
}
func TestSetupAndValidateCreateClusterBogusIp(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "bogus"
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "cluster controlPlaneConfiguration.Endpoint.Host is invalid: bogus", err)
}
func TestSetupAndValidateCreateClusterUsedIp(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "0.0.0.0"
setupContext(t)
ipInUseError := "cluster controlPlaneConfiguration.Endpoint.Host <0.0.0.0> is already in use, please provide a unique IP"
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(fmt.Errorf(ipInUseError))
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, ipInUseError, err)
}
func TestSetupAndValidateCreateClusterNoCloneModeDefaultToLinkedClone(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
govc := NewDummyProviderGovcClient()
provider := newProviderWithGovc(t,
clusterSpec.VSphereDatacenter,
clusterSpec.Cluster,
govc,
)
provider.providerGovcClient = govc
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.CloneMode = ""
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.CloneMode = ""
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.CloneMode = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assert.NoError(t, err, "No error expected for provider.SetupAndValidateCreateCluster()")
for _, m := range clusterSpec.VSphereMachineConfigs {
assert.Equal(t, m.Spec.CloneMode, v1alpha1.LinkedClone)
}
}
func TestSetupAndValidateCreateClusterNoCloneModeDefaultToFullClone(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
govc := NewDummyProviderGovcClient()
provider := newProviderWithGovc(t,
clusterSpec.VSphereDatacenter,
clusterSpec.Cluster,
govc,
)
provider.providerGovcClient = govc
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.CloneMode = ""
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.DiskGiB = 100
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.CloneMode = ""
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.DiskGiB = 100
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.CloneMode = ""
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.DiskGiB = 100
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assert.NoError(t, err, "No error expected for provider.SetupAndValidateCreateCluster()")
for _, m := range clusterSpec.VSphereMachineConfigs {
assert.Equal(t, m.Spec.CloneMode, v1alpha1.FullClone)
}
}
func TestSetupAndValidateCreateClusterFullCloneDiskGiBLessThan20TemplateDiskSize25(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
govc := NewDummyProviderGovcClient()
provider := newProviderWithGovc(t,
clusterSpec.VSphereDatacenter,
clusterSpec.Cluster,
govc,
)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.CloneMode = v1alpha1.FullClone
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.DiskGiB = 10
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.CloneMode = v1alpha1.FullClone
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.DiskGiB = 10
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.CloneMode = v1alpha1.FullClone
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.DiskGiB = 10
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assert.NoError(t, err, "No error expected for provider.SetupAndValidateCreateCluster()")
for _, m := range clusterSpec.VSphereMachineConfigs {
assert.Equalf(t, m.Spec.DiskGiB, 25, "DiskGiB mismatch for VSphereMachineConfig %s", m.Name)
}
}
func TestSetupAndValidateCreateClusterFullCloneDiskGiBLessThan20TemplateDiskSize20(t *testing.T) {
setupContext(t)
ctrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(ctrl)
govc := mocks.NewMockProviderGovcClient(ctrl)
vscb, _ := newMockVSphereClientBuilder(ctrl)
ipValidator := mocks.NewMockIPValidator(ctrl)
spec := givenClusterSpec(t, testClusterConfigMain121CPOnlyFilename)
tt := &providerTest{
t: t,
WithT: NewWithT(t),
ctx: context.Background(),
managementCluster: &types.Cluster{
Name: "m-cluster",
KubeconfigFile: "kubeconfig-m.kubeconfig",
},
workloadCluster: &types.Cluster{
Name: "test",
KubeconfigFile: "kubeconfig-w.kubeconfig",
},
cluster: spec.Cluster,
clusterSpec: spec,
datacenterConfig: spec.VSphereDatacenter,
machineConfigs: spec.VSphereMachineConfigs,
kubectl: kubectl,
govc: govc,
clientBuilder: vscb,
ipValidator: ipValidator,
}
tt.buildNewProvider()
controlPlaneMachineConfigName := tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.CloneMode = v1alpha1.FullClone
tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.DiskGiB = 10
template := tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template
tags := []string{eksd121ReleaseTag, ubuntuOSTag}
tt.setExpectationForSetup()
tt.setExpectationForVCenterValidation()
tt.setExpectationsForMachineConfigsVCenterValidation()
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, template).Return(template, nil)
tt.govc.EXPECT().GetVMDiskSizeInGB(tt.ctx, template, tt.clusterSpec.VSphereDatacenter.Spec.Datacenter)
tt.govc.EXPECT().TemplateHasSnapshot(tt.ctx, template).Return(false, nil)
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, template).Return(template, nil)
tt.govc.EXPECT().GetTags(tt.ctx, template).Return(tags, nil)
tt.govc.EXPECT().ListTags(tt.ctx)
tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Datastore).Return(100.0, nil)
tt.ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(tt.cluster)
err := tt.provider.SetupAndValidateCreateCluster(context.Background(), tt.clusterSpec)
assert.NoError(t, err, "No error expected for provider.SetupAndValidateCreateCluster()")
for _, m := range tt.clusterSpec.VSphereMachineConfigs {
assert.Equalf(t, m.Spec.DiskGiB, 20, "DiskGiB mismatch for VSphereMachineConfig %s", m.Name)
}
}
func TestSetupAndValidateCreateClusterLinkedCloneErrorDiskSize(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
govc := NewDummyProviderGovcClient()
provider := newProviderWithGovc(t,
clusterSpec.VSphereDatacenter,
clusterSpec.Cluster,
govc,
)
provider.providerGovcClient = govc
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.DiskGiB = 100
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assert.ErrorContains(t, err, fmt.Sprintf(
"diskGiB cannot be customized for VSphereMachineConfig '%s' when using 'linkedClone'; change the cloneMode to 'fullClone' or the diskGiB to match the template's (%s) disk size of 25 GiB",
controlPlaneMachineConfigName, clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template,
))
}
func TestSetupAndValidateCreateClusterLinkedCloneErrorNoSnapshots(t *testing.T) {
tt := newProviderTest(t)
controlPlaneMachineConfigName := tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
tt.setExpectationForSetup()
tt.setExpectationForVCenterValidation()
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template).Return(tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template, nil)
tt.govc.EXPECT().GetVMDiskSizeInGB(tt.ctx, tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template, tt.clusterSpec.VSphereDatacenter.Spec.Datacenter)
tt.govc.EXPECT().TemplateHasSnapshot(tt.ctx, tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template).Return(false, nil)
err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec)
assert.Regexp(t,
"cannot use 'linkedClone' for VSphereMachineConfig '.*' because its template (.*) has no snapshots; create snapshots or change the cloneMode to 'fullClone",
err.Error(),
)
}
func TestSetupAndValidateCreateClusterInvalidCloneMode(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
govc := NewDummyProviderGovcClient()
provider := newProviderWithGovc(t,
clusterSpec.VSphereDatacenter,
clusterSpec.Cluster,
govc,
)
provider.providerGovcClient = govc
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
invalidClone := "invalidClone"
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.CloneMode = v1alpha1.CloneMode(invalidClone)
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
assert.ErrorContains(t, err,
fmt.Sprintf(
"cloneMode %s is not supported for VSphereMachineConfig %s. Supported clone modes: [%s, %s]",
invalidClone,
controlPlaneMachineConfigName,
v1alpha1.LinkedClone,
v1alpha1.FullClone,
),
)
}
func TestSetupAndValidateCreateClusterDatastoreUsageError(t *testing.T) {
tt := newProviderTest(t)
tt.setExpectationForSetup()
tt.setExpectationForVCenterValidation()
tt.setExpectationsForDefaultDiskAndCloneModeGovcCalls()
tt.setExpectationsForMachineConfigsVCenterValidation()
cpMachineConfig := tt.machineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
for _, mc := range tt.machineConfigs {
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, mc.Spec.Template).Return(mc.Spec.Template, nil).AnyTimes()
}
tt.govc.EXPECT().GetTags(tt.ctx, cpMachineConfig.Spec.Template).Return([]string{eksd119ReleaseTag, ubuntuOSTag}, nil)
tt.govc.EXPECT().ListTags(tt.ctx)
tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, cpMachineConfig.Spec.Datastore).Return(0.0, fmt.Errorf("error"))
err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec)
thenErrorExpected(t, "validating vsphere machine configs datastore usage: getting datastore details: error", err)
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyCP(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for control plane machine")
}
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyErrorGenerating(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
g := NewWithT(t)
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
ctrl := gomock.NewController(t)
writer := mockswriter.NewMockFileWriter(ctrl)
provider.writer = writer
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
writer.EXPECT().Write(
test.OfType("string"), gomock.Any(), gomock.Not(gomock.Nil()),
).Return("", errors.New("writing file"))
setupContext(t)
g.Expect(
provider.SetupAndValidateCreateCluster(ctx, clusterSpec),
).To(MatchError(ContainSubstring(
"failed setup and validations: generating ssh key pair: writing private key: writing file",
)))
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyWorker(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for worker node machine")
}
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyEtcd(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey did not get generated for etcd machine")
}
}
func TestSetupAndValidateSSHAuthorizedKeyEmptyAllMachineConfigs(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for control plane machine")
}
if clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey has not changed for worker node machine")
}
if clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" {
t.Fatalf("sshAuthorizedKey not generated for etcd machines")
}
if clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] != clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] {
t.Fatalf("sshAuthorizedKey not the same for controlplane and worker machines")
}
if clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] != clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] {
t.Fatalf("sshAuthorizedKey not the same for controlplane and etcd machines")
}
}
func TestSetupAndValidateUsersNil(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users = nil
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users = nil
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users = nil
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
}
func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys = nil
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys = nil
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys = nil
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
}
func TestSetupAndValidateCreateClusterCPMachineGroupRefNonexistent(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "nonexistent"
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "cannot find VSphereMachineConfig nonexistent for control plane", err)
}
func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNonexistent(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = "nonexistent"
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "cannot find VSphereMachineConfig nonexistent for worker nodes", err)
}
func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNonexistent(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "nonexistent"
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "cannot find VSphereMachineConfig nonexistent for etcd machines", err)
}
func TestSetupAndValidateCreateClusterOsFamilyDifferent(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.OSFamily = "bottlerocket"
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].Name = "ec2-user"
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "all VSphereMachineConfigs must have the same osFamily specified", err)
}
func TestSetupAndValidateCreateClusterOsFamilyDifferentForEtcd(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.OSFamily = "bottlerocket"
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].Name = "ec2-user"
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "all VSphereMachineConfigs must have the same osFamily specified", err)
}
func TestSetupAndValidateCreateClusterOsFamilyEmpty(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
govc := NewDummyProviderGovcClient()
govc.osTag = bottlerocketOSTag
provider := newProviderWithGovc(t,
clusterSpec.VSphereDatacenter,
clusterSpec.Cluster,
govc,
)
provider.providerGovcClient = govc
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.OSFamily = ""
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].Name = ""
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.OSFamily = ""
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].Name = ""
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.OSFamily = ""
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Users[0].Name = ""
setupContext(t)
mockCtrl := gomock.NewController(t)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err)
}
if clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.OSFamily != v1alpha1.Bottlerocket {
t.Fatalf("got osFamily for control plane machine as %v, want %v", clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.OSFamily, v1alpha1.Bottlerocket)
}
if clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.OSFamily != v1alpha1.Bottlerocket {
t.Fatalf("got osFamily for control plane machine as %v, want %v", clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.OSFamily, v1alpha1.Bottlerocket)
}
if clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.OSFamily != v1alpha1.Bottlerocket {
t.Fatalf("got osFamily for etcd machine as %v, want %v", clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.OSFamily, v1alpha1.Bottlerocket)
}
}
func TestSetupAndValidateCreateClusterTemplateDifferent(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template = "test"
setupContext(t)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
thenErrorExpected(t, "all VSphereMachineConfigs must have the same template specified", err)
}
func TestSetupAndValidateCreateClusterTemplateDoesNotExist(t *testing.T) {
tt := newProviderTest(t)
tt.setExpectationForSetup()
tt.setExpectationForVCenterValidation()
for _, mc := range tt.machineConfigs {
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, mc.Spec.Template).Return("", nil).MaxTimes(1)
}
err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec)
thenErrorExpected(t, "failed setting default values for vsphere machine configs: template <"+testTemplate+"> not found", err)
}
func TestSetupAndValidateCreateClusterErrorCheckingTemplate(t *testing.T) {
tt := newProviderTest(t)
errorMessage := "failed getting template"
tt.setExpectationForSetup()
tt.setExpectationForVCenterValidation()
for _, mc := range tt.machineConfigs {
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, mc.Spec.Template).Return("", errors.New(errorMessage)).MaxTimes(1)
}
err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec)
thenErrorExpected(t, "failed setting default values for vsphere machine configs: setting template full path: "+errorMessage, err)
}
func TestSetupAndValidateCreateClusterTemplateMissingTags(t *testing.T) {
tt := newProviderTest(t)
tt.setExpectationForSetup()
tt.setExpectationsForDefaultDiskAndCloneModeGovcCalls()
tt.setExpectationForVCenterValidation()
tt.setExpectationsForMachineConfigsVCenterValidation()
for _, mc := range tt.machineConfigs {
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, mc.Spec.Template).Return(mc.Spec.Template, nil)
}
controlPlaneMachineConfigName := tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
controlPlaneMachineConfig := tt.machineConfigs[controlPlaneMachineConfigName]
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, controlPlaneMachineConfig.Spec.Template).Return(controlPlaneMachineConfig.Spec.Template, nil)
tt.govc.EXPECT().GetTags(tt.ctx, controlPlaneMachineConfig.Spec.Template).Return(nil, nil)
err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec)
thenErrorPrefixExpected(t, "template "+testTemplate+" is missing tag ", err)
}
func TestSetupAndValidateCreateClusterErrorGettingTags(t *testing.T) {
tt := newProviderTest(t)
errorMessage := "failed getting tags"
controlPlaneMachineConfigName := tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
controlPlaneMachineConfig := tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName]
tt.setExpectationForSetup()
tt.setExpectationsForDefaultDiskAndCloneModeGovcCalls()
tt.setExpectationForVCenterValidation()
tt.setExpectationsForMachineConfigsVCenterValidation()
for _, mc := range tt.machineConfigs {
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, mc.Spec.Template).Return(mc.Spec.Template, nil)
}
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, controlPlaneMachineConfig.Spec.Template).Return(controlPlaneMachineConfig.Spec.Template, nil)
tt.govc.EXPECT().GetTags(tt.ctx, controlPlaneMachineConfig.Spec.Template).Return(nil, errors.New(errorMessage))
err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec)
thenErrorExpected(t, "validating template tags: failed getting tags", err)
}
func TestSetupAndValidateCreateClusterDefaultTemplate(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.VersionsBundle.EksD.Ova.Bottlerocket.URI = "https://amazonaws.com/artifacts/0.0.1/eks-distro/ova/1-19/1-19-4/bottlerocket-eks-a-0.0.1.build.38-amd64.ova"
clusterSpec.VersionsBundle.EksD.Ova.Bottlerocket.SHA256 = "63a8dce1683379cb8df7d15e9c5adf9462a2b9803a544dd79b16f19a4657967f"
clusterSpec.VersionsBundle.EksD.Ova.Bottlerocket.Arch = []string{"amd64"}
clusterSpec.VersionsBundle.EksD.Name = eksd119Release
clusterSpec.VersionsBundle.EksD.KubeVersion = "v1.19.8"
clusterSpec.VersionsBundle.KubeVersion = "1.19"
clusterSpec.Cluster.Namespace = "test-namespace"
provider := givenProvider(t)
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.Template = ""
workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[workerNodeMachineConfigName].Spec.Template = ""
etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name
clusterSpec.VSphereMachineConfigs[etcdMachineConfigName].Spec.Template = ""
wantError := fmt.Errorf("failed setting default values for vsphere machine configs: can not import ova for osFamily: ubuntu, please use bottlerocket as osFamily for auto-importing or provide a valid template")
setupContext(t)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err == nil || err.Error() != wantError.Error() {
t.Fatalf("provider.SetupAndValidateCreateCluster() err = %v, want err = %v", err, wantError)
}
}
func TestGetInfrastructureBundleSuccess(t *testing.T) {
tests := []struct {
testName string
clusterSpec *cluster.Spec
}{
{
testName: "correct Overrides layer",
clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.VSphere = releasev1alpha1.VSphereBundle{
Version: "v0.7.8",
ClusterAPIController: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v0.7.8-35f54b0a7ff0f4f3cb0b8e30a0650acd0e55496a",
},
Manager: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774",
},
KubeVip: releasev1alpha1.Image{
URI: "public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774",
},
Metadata: releasev1alpha1.Manifest{
URI: "Metadata.yaml",
},
Components: releasev1alpha1.Manifest{
URI: "Components.yaml",
},
ClusterTemplate: releasev1alpha1.Manifest{
URI: "ClusterTemplate.yaml",
},
}
}),
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
p := givenProvider(t)
infraBundle := p.GetInfrastructureBundle(tt.clusterSpec)
if infraBundle == nil {
t.Fatalf("provider.GetInfrastructureBundle() should have an infrastructure bundle")
}
assert.Equal(t, "infrastructure-vsphere/v0.7.8/", infraBundle.FolderName, "Incorrect folder name")
assert.Equal(t, len(infraBundle.Manifests), 3, "Wrong number of files in the infrastructure bundle")
wantManifests := []releasev1alpha1.Manifest{
tt.clusterSpec.VersionsBundle.VSphere.Components,
tt.clusterSpec.VersionsBundle.VSphere.Metadata,
tt.clusterSpec.VersionsBundle.VSphere.ClusterTemplate,
}
assert.ElementsMatch(t, infraBundle.Manifests, wantManifests, "Incorrect manifests")
})
}
}
func TestGetDatacenterConfig(t *testing.T) {
tt := newProviderTest(t)
providerConfig := tt.provider.DatacenterConfig(tt.clusterSpec)
tt.Expect(providerConfig).To(BeAssignableToTypeOf(&v1alpha1.VSphereDatacenterConfig{}))
d := providerConfig.(*v1alpha1.VSphereDatacenterConfig)
tt.Expect(d).To(Equal(tt.clusterSpec.VSphereDatacenter))
}
func TestValidateNewSpecSuccess(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
setupContext(t)
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
clusterVsphereSecret := &v1.Secret{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Data: map[string][]byte{
"username": []byte("vsphere_username"),
"password": []byte("vsphere_password"),
},
}
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
for _, config := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(config, nil)
}
kubectl.EXPECT().GetSecretFromNamespace(gomock.Any(), gomock.Any(), CredentialsObjectName, gomock.Any()).Return(clusterVsphereSecret, nil)
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.NoError(t, err, "No error should be returned when previous spec == new spec")
}
func TestValidateNewSpecMutableFields(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
setupContext(t)
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
for _, config := range newClusterSpec.VSphereMachineConfigs {
config.Spec.ResourcePool = "new-" + config.Spec.ResourcePool
config.Spec.Folder = "new=" + config.Spec.Folder
}
clusterVsphereSecret := &v1.Secret{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Data: map[string][]byte{
"username": []byte("vsphere_username"),
"password": []byte("vsphere_password"),
},
}
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), gomock.Any()).Return(clusterSpec.VSphereDatacenter, nil)
for _, config := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(config, nil)
}
kubectl.EXPECT().GetSecretFromNamespace(gomock.Any(), gomock.Any(), CredentialsObjectName, gomock.Any()).Return(clusterVsphereSecret, nil)
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.NoError(t, err, "No error should be returned when modifying mutable fields")
}
func TestValidateNewSpecDatacenterImmutable(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
clusterSpec.VSphereDatacenter.Spec.Datacenter = "new-" + clusterSpec.VSphereDatacenter.Spec.Datacenter
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
for _, config := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(config, nil)
}
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.Error(t, err, "Datacenter should be immutable")
}
func TestValidateNewSpecMachineConfigNotFound(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
newClusterSpec.VSphereDatacenter.Spec.Datacenter = "new-" + newClusterSpec.VSphereDatacenter.Spec.Datacenter
newClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "missing-machine-group"
newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = "missing-machine-group"
newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "missing-machine-group"
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.Errorf(t, err, "can't find machine config missing-machine-group in vsphere provider machine configs")
}
func TestValidateNewSpecServerImmutable(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
newClusterSpec.VSphereDatacenter.Spec.Server = "new-" + newClusterSpec.VSphereDatacenter.Spec.Server
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
for _, config := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(config, nil)
}
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.Error(t, err, "Server should be immutable")
}
func TestValidateNewSpecStoragePolicyNameImmutableControlPlane(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
controlPlaneMachineConfigName := newClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
controlPlaneMachineConfig := newClusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName]
controlPlaneMachineConfig.Spec.StoragePolicyName = "new-" + controlPlaneMachineConfig.Spec.StoragePolicyName
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName], nil).AnyTimes()
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.ErrorContains(t, err, "spec.storagePolicyName is immutable", "StoragePolicyName should be immutable")
}
func TestValidateNewSpecStoragePolicyNameImmutableWorker(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
workerMachineConfigName := newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
workerMachineConfig := newClusterSpec.VSphereMachineConfigs[workerMachineConfigName]
workerMachineConfig.Spec.StoragePolicyName = "new-" + workerMachineConfig.Spec.StoragePolicyName
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[workerMachineConfigName], nil).AnyTimes()
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.ErrorContains(t, err, "spec.storagePolicyName is immutable", "StoragePolicyName should be immutable")
}
func TestValidateNewSpecOSFamilyImmutableControlPlane(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name
newClusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.OSFamily = "bottlerocket"
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName], nil).AnyTimes()
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.ErrorContains(t, err, "spec.osFamily is immutable", "OSFamily should be immutable")
}
func TestValidateNewSpecOSFamilyImmutableWorker(t *testing.T) {
mockCtrl := gomock.NewController(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
provider := givenProvider(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
workerMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
newClusterSpec.VSphereMachineConfigs[workerMachineConfigName].Spec.OSFamily = "bottlerocket"
kubectl.EXPECT().GetEksaCluster(context.TODO(), gomock.Any(), gomock.Any()).Return(clusterSpec.Cluster, nil)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(context.TODO(), clusterSpec.Cluster.Spec.DatacenterRef.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
kubectl.EXPECT().GetEksaVSphereMachineConfig(context.TODO(), gomock.Any(), gomock.Any(), clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereMachineConfigs[workerMachineConfigName], nil).AnyTimes()
err := provider.ValidateNewSpec(context.TODO(), &types.Cluster{}, newClusterSpec)
assert.ErrorContains(t, err, "spec.osFamily is immutable", "OSFamily should be immutable")
}
func TestChangeDiffNoChange(t *testing.T) {
provider := givenProvider(t)
clusterSpec := givenEmptyClusterSpec()
assert.Nil(t, provider.ChangeDiff(clusterSpec, clusterSpec))
}
func TestChangeDiffWithChange(t *testing.T) {
provider := givenProvider(t)
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.VSphere.Version = "v0.3.18"
})
newClusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.VSphere.Version = "v0.3.19"
})
wantDiff := &types.ComponentChangeDiff{
ComponentName: "vsphere",
NewVersion: "v0.3.19",
OldVersion: "v0.3.18",
}
assert.Equal(t, wantDiff, provider.ChangeDiff(clusterSpec, newClusterSpec))
}
func TestVsphereProviderRunPostControlPlaneUpgrade(t *testing.T) {
tt := newProviderTest(t)
tt.Expect(tt.provider.RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.workloadCluster, tt.managementCluster)).To(Succeed())
}
func TestProviderUpgradeNeeded(t *testing.T) {
testCases := []struct {
testName string
newManager, oldManager string
newKubeVip, oldKubeVip string
want bool
}{
{
testName: "different manager",
newManager: "a", oldManager: "b",
want: true,
},
{
testName: "different kubevip",
newKubeVip: "a", oldKubeVip: "b",
want: true,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
provider := givenProvider(t)
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.VSphere.Manager.ImageDigest = tt.oldManager
s.VersionsBundle.VSphere.KubeVip.ImageDigest = tt.oldKubeVip
})
newClusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.VSphere.Manager.ImageDigest = tt.newManager
s.VersionsBundle.VSphere.KubeVip.ImageDigest = tt.newKubeVip
})
g := NewWithT(t)
g.Expect(provider.UpgradeNeeded(context.Background(), clusterSpec, newClusterSpec, nil)).To(Equal(tt.want))
})
}
}
func TestProviderGenerateCAPISpecForCreateWithPodIAMConfig(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.Cluster.Spec.PodIAMConfig = &v1alpha1.PodIAMConfig{ServiceAccountIssuer: "https://test"}
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_pod_iam_config.yaml")
}
func TestProviderGenerateCAPISpecForCreateWithCustomResolvConf(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf = &v1alpha1.ResolvConf{Path: "/etc/my-custom-resolv.conf"}
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_custom_resolv_conf.yaml")
}
func TestProviderGenerateCAPISpecForCreateVersion121(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMain121Filename)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMain121Filename)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_121_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_main_121_md.yaml")
}
func TestSetupAndValidateCreateManagementDoesNotCheckIfMachineAndDataCenterExist(t *testing.T) {
ctx := context.Background()
provider := givenProvider(t)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
setupContext(t)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider.ipValidator = ipValidator
for _, config := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().SearchVsphereMachineConfig(context.TODO(), config.Name, gomock.Any(), config.Namespace).Return([]*v1alpha1.VSphereMachineConfig{}, nil).Times(0)
}
kubectl.EXPECT().SearchVsphereDatacenterConfig(context.TODO(), datacenterConfig.Name, gomock.Any(), clusterSpec.Cluster.Namespace).Return([]*v1alpha1.VSphereDatacenterConfig{}, nil).Times(0)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
assert.NoError(t, err, "No error should be returned")
}
func TestClusterSpecChangedNoChanges(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cluster := &types.Cluster{
KubeconfigFile: "test",
}
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
for _, value := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, value.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(value, nil)
}
ipValidator := mocks.NewMockIPValidator(mockCtrl)
provider := newProviderWithKubectl(t, dcConfig, clusterSpec.Cluster, kubectl, ipValidator)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, clusterSpec.Cluster.Spec.DatacenterRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(dcConfig, nil)
specChanged, err := provider.UpgradeNeeded(ctx, clusterSpec, clusterSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
if specChanged {
t.Fatalf("expected no spec change to be detected")
}
}
func TestClusterSpecChangedDatacenterConfigChanged(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
newClusterSpec := clusterSpec.DeepCopy()
cluster := &types.Cluster{
KubeconfigFile: "test",
}
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
newClusterSpec.VSphereDatacenter.Spec.Datacenter = "shiny-new-api-datacenter"
provider := newProviderWithKubectl(t, dcConfig, clusterSpec.Cluster, kubectl, ipValidator)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, clusterSpec.Cluster.Spec.DatacenterRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(clusterSpec.VSphereDatacenter, nil)
specChanged, err := provider.UpgradeNeeded(ctx, newClusterSpec, clusterSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
if !specChanged {
t.Fatalf("expected spec change but none was detected")
}
}
func TestClusterSpecChangedMachineConfigsChanged(t *testing.T) {
ctx := context.Background()
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
cluster := &types.Cluster{
KubeconfigFile: "test",
}
dcConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
modifiedMachineConfig := clusterSpec.VSphereMachineConfigs[clusterSpec.Cluster.MachineConfigRefs()[0].Name].DeepCopy()
modifiedMachineConfig.Spec.NumCPUs = 4
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(modifiedMachineConfig, nil)
provider := newProviderWithKubectl(t, dcConfig, clusterSpec.Cluster, kubectl, ipValidator)
kubectl.EXPECT().GetEksaVSphereDatacenterConfig(ctx, clusterSpec.Cluster.Spec.DatacenterRef.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace).Return(dcConfig, nil)
specChanged, err := provider.UpgradeNeeded(ctx, clusterSpec, clusterSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
if !specChanged {
t.Fatalf("expected spec change but none was detected")
}
}
func TestValidateMachineConfigsDatastoreUsageCreateSuccess(t *testing.T) {
tt := newProviderTest(t)
machineConfigs := tt.clusterSpec.VSphereMachineConfigs
machineConfigs[tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.Datastore = "test-datastore"
for _, config := range machineConfigs {
tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, config.Spec.Datastore).Return(200.0, nil)
}
vSpec := NewSpec(tt.clusterSpec)
err := tt.provider.validateDatastoreUsageForCreate(tt.ctx, vSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestValidateMachineConfigsDatastoreUsageCreateError(t *testing.T) {
tt := newProviderTest(t)
machineConfigs := tt.clusterSpec.VSphereMachineConfigs
for _, config := range machineConfigs {
tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, config.Spec.Datastore).Return(50.0, nil)
}
vSpec := NewSpec(tt.clusterSpec)
err := tt.provider.validateDatastoreUsageForCreate(tt.ctx, vSpec)
thenErrorExpected(t, fmt.Sprintf("not enough space in datastore %s for given diskGiB and count for respective machine groups", tt.clusterSpec.VSphereMachineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec.Datastore), err)
}
func TestValidateMachineConfigsDatastoreUsageUpgradeError(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Name: "test",
}
tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.GetName()).Return(tt.clusterSpec.Cluster.DeepCopy(), nil)
machineConfigs := tt.clusterSpec.VSphereMachineConfigs
for _, config := range machineConfigs {
tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).AnyTimes()
tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, config.Spec.Datastore).Return(50.0, nil)
}
vSpec := NewSpec(tt.clusterSpec)
err := tt.provider.validateDatastoreUsageForUpgrade(tt.ctx, vSpec, cluster)
thenErrorExpected(t, fmt.Sprintf("not enough space in datastore %s for given diskGiB and count for respective machine groups", tt.clusterSpec.VSphereMachineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec.Datastore), err)
}
func TestValidateMachineConfigsNameUniquenessSuccess(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Name: "test",
}
prevSpec := tt.clusterSpec.DeepCopy()
prevSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "prev-test-cp"
prevSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "prev-test-etcd"
tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.Name).Return(prevSpec.Cluster, nil)
machineConfigs := tt.clusterSpec.VSphereMachineConfigs
for _, config := range machineConfigs {
tt.kubectl.EXPECT().SearchVsphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.VSphereMachineConfig{}, nil).AnyTimes()
}
err := tt.provider.validateMachineConfigsNameUniqueness(tt.ctx, cluster, tt.clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
}
}
func TestValidateMachineConfigsNameUniquenessError(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Name: "test",
}
prevSpec := tt.clusterSpec.DeepCopy()
prevSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "prev-test-cp"
prevSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "prev-test-etcd"
dummyVsphereMachineConfig := &v1alpha1.VSphereMachineConfig{
Spec: v1alpha1.VSphereMachineConfigSpec{
Users: []v1alpha1.UserConfiguration{{Name: "ec2-user"}},
},
}
tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.Name).Return(prevSpec.Cluster, nil)
machineConfigs := tt.clusterSpec.VSphereMachineConfigs
for _, config := range machineConfigs {
tt.kubectl.EXPECT().SearchVsphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).Return([]*v1alpha1.VSphereMachineConfig{dummyVsphereMachineConfig}, nil).AnyTimes()
}
err := tt.provider.validateMachineConfigsNameUniqueness(tt.ctx, cluster, tt.clusterSpec)
thenErrorExpected(t, fmt.Sprintf("control plane VSphereMachineConfig %s already exists", tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name), err)
}
func TestProviderGenerateCAPISpecForCreateCloudProviderCredentials(t *testing.T) {
tests := []struct {
testName string
wantCPFile string
envMap map[string]string
}{
{
testName: "specify cloud provider credentials",
wantCPFile: "testdata/expected_results_main_cp_cloud_provider_credentials.yaml",
envMap: map[string]string{config.EksavSphereCPUsernameKey: "EksavSphereCPUsername", config.EksavSphereCPPasswordKey: "EksavSphereCPPassword"},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
setupContext(t)
previousValues := map[string]string{}
for k, v := range tt.envMap {
previousValues[k] = os.Getenv(k)
if err := os.Setenv(k, v); err != nil {
t.Fatalf(err.Error())
}
}
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
cluster := &types.Cluster{
Name: "test",
}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, ipValidator)
if provider == nil {
t.Fatalf("provider object is nil")
}
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec)
if err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), tt.wantCPFile)
for k, v := range previousValues {
if err := os.Setenv(k, v); err != nil {
t.Fatalf(err.Error())
}
}
})
}
}
func TestVsphereProviderMachineConfigsSelfManagedCluster(t *testing.T) {
tt := newProviderTest(t)
machineConfigs := tt.provider.MachineConfigs(tt.clusterSpec)
tt.Expect(machineConfigs).To(HaveLen(3))
for _, m := range machineConfigs {
tt.Expect(m).To(BeAssignableToTypeOf(&v1alpha1.VSphereMachineConfig{}))
machineConfig := m.(*v1alpha1.VSphereMachineConfig)
tt.Expect(machineConfig.IsManaged()).To(BeFalse())
if machineConfig.Name == tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name {
tt.Expect(machineConfig.IsControlPlane()).To(BeTrue())
}
if machineConfig.Name == tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name {
tt.Expect(machineConfig.IsEtcd()).To(BeTrue())
}
}
}
func TestVsphereProviderMachineConfigsManagedCluster(t *testing.T) {
tt := newProviderTest(t)
tt.clusterSpec.Cluster.SetManagedBy("my-management-cluster")
machineConfigs := tt.provider.MachineConfigs(tt.clusterSpec)
tt.Expect(machineConfigs).To(HaveLen(3))
for _, m := range machineConfigs {
tt.Expect(m).To(BeAssignableToTypeOf(&v1alpha1.VSphereMachineConfig{}))
machineConfig := m.(*v1alpha1.VSphereMachineConfig)
tt.Expect(machineConfig.IsManaged()).To(BeTrue())
if machineConfig.Name == tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name {
tt.Expect(machineConfig.IsControlPlane()).To(BeTrue())
}
if machineConfig.Name == tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name {
tt.Expect(machineConfig.IsEtcd()).To(BeTrue())
}
}
}
func TestProviderGenerateDeploymentFileForBottleRocketWithNTPConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_ntp_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
govc.osTag = bottlerocketOSTag
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_ntp_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_ntp_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForUbuntuWithNTPConfig(t *testing.T) {
clusterSpecManifest := "cluster_ubuntu_ntp_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_ubuntu_ntp_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_ubuntu_ntp_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottlerocketWithBottlerocketSettingsConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_settings_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
govc.osTag = bottlerocketOSTag
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_settings_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_settings_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottlerocketWithKernelConfig(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_kernel_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
govc.osTag = bottlerocketOSTag
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_kernel_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_kernel_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottlerocketWithBootSettings(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_boot_settings_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
govc.osTag = bottlerocketOSTag
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_boot_settings_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_boot_settings_config_md.yaml")
}
func TestProviderGenerateDeploymentFileForBottlerocketWithTrustedCertBundles(t *testing.T) {
clusterSpecManifest := "cluster_bottlerocket_cert_bundles_config.yaml"
mockCtrl := gomock.NewController(t)
setupContext(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{Name: "test"}
clusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
ctx := context.Background()
govc := NewDummyProviderGovcClient()
vscb, _ := newMockVSphereClientBuilder(mockCtrl)
ipValidator := mocks.NewMockIPValidator(mockCtrl)
ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil)
v := NewValidator(govc, vscb)
govc.osTag = bottlerocketOSTag
provider := newProvider(
t,
datacenterConfig,
clusterSpec.Cluster,
govc,
kubectl,
v,
ipValidator,
)
if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil {
t.Fatalf("failed to setup and validate: %v", err)
}
cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec)
if err != nil {
t.Fatalf("failed to generate cluster api spec contents: %v", err)
}
test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_cert_bundles_config_cp.yaml")
test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_cert_bundles_config_md.yaml")
}
| 3,678 |
eks-anywhere | aws | Go | package vsphere
import (
"context"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
capiyaml "github.com/aws/eks-anywhere/pkg/clusterapi/yaml"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
type (
// Workers represents the vSphere specific CAPI spec for worker nodes.
Workers = clusterapi.Workers[*vspherev1.VSphereMachineTemplate]
workersBuilder = capiyaml.WorkersBuilder[*vspherev1.VSphereMachineTemplate]
)
// WorkersSpec generates a vSphere specific CAPI spec for an eks-a cluster worker nodes.
// It talks to the cluster with a client to detect changes in immutable objects and generates new
// names for them.
func WorkersSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*Workers, error) {
// TODO(g-gaston): refactor template builder so it doesn't behave differently for controller and CLI
// TODO(g-gaston): do we need time.Now if the names are not dependent on a timestamp anymore?
templateBuilder := NewVsphereTemplateBuilder(time.Now)
workersYaml, err := templateBuilder.CAPIWorkersSpecWithInitialNames(spec)
if err != nil {
return nil, err
}
parser, builder, err := newWorkersParserAndBuilder(logger)
if err != nil {
return nil, err
}
if err = parser.Parse(workersYaml, builder); err != nil {
return nil, errors.Wrap(err, "parsing vSphere CAPI workers yaml")
}
workers := builder.Workers
if err = workers.UpdateImmutableObjectNames(ctx, client, getMachineTemplate, machineTemplateEqual); err != nil {
return nil, errors.Wrap(err, "updating vSphere worker immutable object names")
}
return workers, nil
}
func newWorkersParserAndBuilder(logger logr.Logger) (*yamlutil.Parser, *workersBuilder, error) {
parser, builder, err := capiyaml.NewWorkersParserAndBuilder(
logger,
machineTemplateMapping(),
)
if err != nil {
return nil, nil, errors.Wrap(err, "building vSphere workers parser and builder")
}
return parser, builder, nil
}
func machineTemplateMapping() yamlutil.Mapping[*vspherev1.VSphereMachineTemplate] {
return yamlutil.NewMapping(
"VSphereMachineTemplate",
func() *vspherev1.VSphereMachineTemplate {
return &vspherev1.VSphereMachineTemplate{}
},
)
}
| 73 |
eks-anywhere | aws | Go | package vsphere_test
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestWorkersSpecNewCluster(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml")
client := test.NewFakeKubeClient()
workers, err := vsphere.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workers).NotTo(BeNil())
g.Expect(workers.Groups).To(HaveLen(2))
g.Expect(workers.Groups).To(ConsistOf(
clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(),
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: machineTemplate(),
},
clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(
func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Name = "test-md-1-1"
},
),
MachineDeployment: machineDeployment(
func(md *clusterv1.MachineDeployment) {
md.Name = "test-md-1"
md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1"
md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1"
md.Spec.Replicas = ptr.Int32(2)
},
),
ProviderMachineTemplate: machineTemplate(
func(vmt *vspherev1.VSphereMachineTemplate) {
vmt.Name = "test-md-1-1"
},
),
},
))
}
func TestWorkersSpecUpgradeCluster(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml")
oldGroup1 := &clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(),
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: machineTemplate(),
}
oldGroup2 := &clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(
func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Name = "test-md-1-1"
},
),
MachineDeployment: machineDeployment(
func(md *clusterv1.MachineDeployment) {
md.Name = "test-md-1"
md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1"
md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1"
md.Spec.Replicas = ptr.Int32(2)
},
),
ProviderMachineTemplate: machineTemplate(
func(vmt *vspherev1.VSphereMachineTemplate) {
vmt.Name = "test-md-1-1"
},
),
}
// Always make copies before passing to client since it does modifies the api objects
// Like for example, the ResourceVersion
expectedGroup1 := oldGroup1.DeepCopy()
expectedGroup2 := oldGroup2.DeepCopy()
objs := make([]kubernetes.Object, 0, 6)
objs = append(objs, oldGroup1.Objects()...)
objs = append(objs, oldGroup2.Objects()...)
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(objs)...)
// This will cause a change in the vsphere machine templates, which is immutable
spec.VSphereMachineConfigs["test-wn"].Spec.NumCPUs = 10
// This will cause a change in the kubeadmconfigtemplate which we also treat as immutable
spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Taints = []corev1.Taint{}
spec.Cluster.Spec.WorkerNodeGroupConfigurations[1].Taints = []corev1.Taint{}
expectedGroup1.MachineDeployment.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2"
expectedGroup1.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-2"
expectedGroup1.KubeadmConfigTemplate.Name = "test-md-0-2"
expectedGroup1.KubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints = []corev1.Taint{}
expectedGroup1.ProviderMachineTemplate.Name = "test-md-0-2"
expectedGroup1.ProviderMachineTemplate.Spec.Template.Spec.NumCPUs = 10
expectedGroup2.MachineDeployment.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-2"
expectedGroup2.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-2"
expectedGroup2.KubeadmConfigTemplate.Name = "test-md-1-2"
expectedGroup2.KubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints = []corev1.Taint{}
expectedGroup2.ProviderMachineTemplate.Name = "test-md-1-2"
expectedGroup2.ProviderMachineTemplate.Spec.Template.Spec.NumCPUs = 10
workers, err := vsphere.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workers).NotTo(BeNil())
g.Expect(workers.Groups).To(HaveLen(2))
g.Expect(workers.Groups).To(ConsistOf(*expectedGroup1, *expectedGroup2))
}
func TestWorkersSpecUpgradeClusterNoMachineTemplateChanges(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml")
oldGroup1 := &clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(),
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: machineTemplate(),
}
oldGroup2 := &clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(
func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Name = "test-md-1-1"
},
),
MachineDeployment: machineDeployment(
func(md *clusterv1.MachineDeployment) {
md.Name = "test-md-1"
md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1"
md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1"
md.Spec.Replicas = ptr.Int32(2)
},
),
ProviderMachineTemplate: machineTemplate(
func(vmt *vspherev1.VSphereMachineTemplate) {
vmt.Name = "test-md-1-1"
},
),
}
// Always make copies before passing to client since it does modifies the api objects
// Like for example, the ResourceVersion
expectedGroup1 := oldGroup1.DeepCopy()
expectedGroup2 := oldGroup2.DeepCopy()
// This mimics what would happen if the objects were returned by a real api server
// It helps make sure that the immutable object comparison is able to deal with these
// kind of changes.
oldGroup1.ProviderMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now())
oldGroup2.ProviderMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now())
// This is testing defaults. We don't set Snapshot in our machine templates,
// but it's possible that some default logic does. We need to take this into
// consideration when checking for equality.
oldGroup1.ProviderMachineTemplate.Spec.Template.Spec.Snapshot = "current"
oldGroup1.ProviderMachineTemplate.Spec.Template.Spec.Snapshot = "current"
client := test.NewFakeKubeClient(
oldGroup1.MachineDeployment,
oldGroup1.KubeadmConfigTemplate,
oldGroup1.ProviderMachineTemplate,
oldGroup2.MachineDeployment,
oldGroup2.KubeadmConfigTemplate,
oldGroup2.ProviderMachineTemplate,
)
workers, err := vsphere.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workers).NotTo(BeNil())
g.Expect(workers.Groups).To(HaveLen(2))
g.Expect(workers.Groups).To(ConsistOf(*expectedGroup1, *expectedGroup2))
}
func TestWorkersSpecErrorFromClient(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml")
client := test.NewFakeKubeClientAlwaysError()
_, err := vsphere.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).To(MatchError(ContainSubstring("updating vSphere worker immutable object names")))
}
func TestWorkersSpecMachineTemplateNotFound(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml")
client := test.NewFakeKubeClient(machineDeployment())
_, err := vsphere.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
}
func TestWorkersSpecRegistryMirrorConfiguration(t *testing.T) {
g := NewWithT(t)
logger := test.NewNullLogger()
ctx := context.Background()
spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml")
client := test.NewFakeKubeClient()
tests := []struct {
name string
mirrorConfig *anywherev1.RegistryMirrorConfiguration
files []bootstrapv1.File
}{
{
name: "insecure skip verify",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerify(),
},
{
name: "insecure skip verify with ca cert",
mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(),
files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
spec.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig
workers, err := vsphere.WorkersSpec(ctx, logger, client, spec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workers).NotTo(BeNil())
g.Expect(workers.Groups).To(HaveLen(2))
g.Expect(workers.Groups).To(ConsistOf(
clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...)
kct.Spec.Template.Spec.PreKubeadmCommands = append(test.RegistryMirrorSudoPreKubeadmCommands(), kct.Spec.Template.Spec.PreKubeadmCommands...)
}),
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: machineTemplate(),
},
clusterapi.WorkerGroup[*vspherev1.VSphereMachineTemplate]{
KubeadmConfigTemplate: kubeadmConfigTemplate(
func(kct *bootstrapv1.KubeadmConfigTemplate) {
kct.Name = "test-md-1-1"
kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...)
kct.Spec.Template.Spec.PreKubeadmCommands = append(test.RegistryMirrorSudoPreKubeadmCommands(), kct.Spec.Template.Spec.PreKubeadmCommands...)
},
),
MachineDeployment: machineDeployment(
func(md *clusterv1.MachineDeployment) {
md.Name = "test-md-1"
md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1"
md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1"
md.Spec.Replicas = ptr.Int32(2)
},
),
ProviderMachineTemplate: machineTemplate(
func(vmt *vspherev1.VSphereMachineTemplate) {
vmt.Name = "test-md-1-1"
},
),
},
))
})
}
}
func machineDeployment(opts ...func(*clusterv1.MachineDeployment)) *clusterv1.MachineDeployment {
o := &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "MachineDeployment",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-md-0",
Namespace: "eksa-system",
Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test"},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: "test",
Replicas: ptr.Int32(3),
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Template: clusterv1.MachineTemplateSpec{
ObjectMeta: clusterv1.ObjectMeta{
Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test"},
},
Spec: clusterv1.MachineSpec{
ClusterName: "test",
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &corev1.ObjectReference{
Kind: "KubeadmConfigTemplate",
Name: "test-md-0-1",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
},
InfrastructureRef: corev1.ObjectReference{
Kind: "VSphereMachineTemplate",
Name: "test-md-0-1",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
Version: ptr.String("v1.19.8-eks-1-19-4"),
},
},
},
}
for _, opt := range opts {
opt(o)
}
return o
}
func kubeadmConfigTemplate(opts ...func(*bootstrapv1.KubeadmConfigTemplate)) *bootstrapv1.KubeadmConfigTemplate {
o := &bootstrapv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmConfigTemplate",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-md-0-1",
Namespace: "eksa-system",
},
Spec: bootstrapv1.KubeadmConfigTemplateSpec{
Template: bootstrapv1.KubeadmConfigTemplateResource{
Spec: bootstrapv1.KubeadmConfigSpec{
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
Name: "{{ ds.meta_data.hostname }}",
CRISocket: "/var/run/containerd/containerd.sock",
Taints: []corev1.Taint{
{
Key: "key2",
Value: "val2",
Effect: "PreferNoSchedule",
TimeAdded: nil,
},
},
KubeletExtraArgs: map[string]string{
"anonymous-auth": "false",
"cloud-provider": "external",
"read-only-port": "0",
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
PreKubeadmCommands: []string{
`hostname "{{ ds.meta_data.hostname }}"`,
`echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts`,
`echo "127.0.0.1 localhost" >>/etc/hosts`,
`echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts`,
`echo "{{ ds.meta_data.hostname }}" >/etc/hostname`,
},
Users: []bootstrapv1.User{
{
Name: "capv",
Sudo: ptr.String("ALL=(ALL) NOPASSWD:ALL"),
SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
Format: bootstrapv1.Format("cloud-config"),
},
},
},
}
for _, opt := range opts {
opt(o)
}
return o
}
func machineTemplate(opts ...func(*vspherev1.VSphereMachineTemplate)) *vspherev1.VSphereMachineTemplate {
o := &vspherev1.VSphereMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "VSphereMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-md-0-1",
Namespace: "eksa-system",
},
Spec: vspherev1.VSphereMachineTemplateSpec{
Template: vspherev1.VSphereMachineTemplateResource{
Spec: vspherev1.VSphereMachineSpec{
VirtualMachineCloneSpec: vspherev1.VirtualMachineCloneSpec{
Template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6",
CloneMode: vspherev1.CloneMode("linkedClone"),
Server: "vsphere_server",
Thumbprint: "ABCDEFG",
Datacenter: "SDDC-Datacenter",
Folder: "/SDDC-Datacenter/vm",
Datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore",
StoragePolicyName: "vSAN Default Storage Policy",
ResourcePool: "*/Resources",
Network: vspherev1.NetworkSpec{
Devices: []vspherev1.NetworkDeviceSpec{
{
NetworkName: "/SDDC-Datacenter/network/sddc-cgw-network-1",
DHCP4: true,
},
},
},
NumCPUs: 3,
MemoryMiB: 4096,
DiskGiB: 25,
},
},
},
},
}
for _, opt := range opts {
opt(o)
}
return o
}
| 438 |
eks-anywhere | aws | Go | package tags
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
)
type Factory struct {
client GovcClient
}
type GovcClient interface {
ListTags(ctx context.Context) ([]executables.Tag, error)
CreateTag(ctx context.Context, tag, category string) error
AddTag(ctx context.Context, path, tag string) error
ListCategories(ctx context.Context) ([]string, error)
CreateCategoryForVM(ctx context.Context, name string) error
}
func NewFactory(client GovcClient) *Factory {
return &Factory{client}
}
func (f *Factory) TagTemplate(ctx context.Context, templatePath string, tagsByCategory map[string][]string) error {
logger.V(2).Info("Tagging template", "template", templatePath)
categories, err := f.client.ListCategories(ctx)
if err != nil {
return fmt.Errorf("failed listing vsphere categories: %v", err)
}
tags, err := f.client.ListTags(ctx)
if err != nil {
return fmt.Errorf("failed listing vsphere tags: %v", err)
}
tagNames := make([]string, 0, len(tags))
for _, t := range tags {
tagNames = append(tagNames, t.Name)
}
categoriesLookup := types.SliceToLookup(categories)
tagsLookup := types.SliceToLookup(tagNames)
for category, tags := range tagsByCategory {
if !categoriesLookup.IsPresent(category) {
logger.V(3).Info("Creating category", "category", category)
if err = f.client.CreateCategoryForVM(ctx, category); err != nil {
return fmt.Errorf("failed creating category for tags: %v", err)
}
}
for _, tag := range tags {
if !tagsLookup.IsPresent(tag) {
logger.V(3).Info("Creating tag", "tag", tag, "category", category)
if err = f.client.CreateTag(ctx, tag, category); err != nil {
return fmt.Errorf("failed creating tag before tagging template: %v", err)
}
}
logger.V(3).Info("Adding tag to template", "tag", tag, "template", templatePath)
if err = f.client.AddTag(ctx, templatePath, tag); err != nil {
return fmt.Errorf("failed tagging template: %v", err)
}
}
}
return nil
}
| 72 |
eks-anywhere | aws | Go | package tags_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/internal/tags"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/internal/tags/mocks"
)
type test struct {
t *testing.T
govc *mocks.MockGovcClient
factory *tags.Factory
ctx context.Context
dummyError error
}
type tagTest struct {
*test
templatePath string
tagsByCategory map[string][]string
}
func newTest(t *testing.T) *test {
ctrl := gomock.NewController(t)
test := &test{
t: t,
govc: mocks.NewMockGovcClient(ctrl),
ctx: context.Background(),
dummyError: errors.New("error from govc"),
}
f := tags.NewFactory(test.govc)
test.factory = f
return test
}
func newTagTest(t *testing.T) *tagTest {
test := newTest(t)
return &tagTest{
test: test,
templatePath: "/SDDC-Datacenter/vm/Templates/ubuntu-v1.19.8-eks-d-1-19-4-eks-a-0.0.1.build.38-amd64",
tagsByCategory: map[string][]string{
"kubernetesChannel": {"kubernetesChannel:1.19"},
"eksd": {"eksd:1.19", "eksd:1.19.4"},
},
}
}
func (tt *tagTest) tagTemplate() error {
return tt.factory.TagTemplate(tt.ctx, tt.templatePath, tt.tagsByCategory)
}
func (tt *tagTest) assertErrorFromTagTemplate() {
if err := tt.tagTemplate(); err == nil {
tt.t.Fatal("factory.TagTemplate() err = nil, want err not nil")
}
}
func (tt *tagTest) assertSuccessFromTagTemplate() {
if err := tt.tagTemplate(); err != nil {
tt.t.Fatalf("factory.TagTemplate() err = %v, want err = nil", err)
}
}
func TestFactoryTagTemplateErrorListCategories(t *testing.T) {
tt := newTagTest(t)
tt.govc.EXPECT().ListCategories(tt.ctx).Return(nil, tt.dummyError)
tt.assertErrorFromTagTemplate()
}
func TestFactoryTagTemplateErrorListTags(t *testing.T) {
tt := newTagTest(t)
tt.govc.EXPECT().ListCategories(tt.ctx).Return(nil, nil)
tt.govc.EXPECT().ListTags(tt.ctx).Return(nil, tt.dummyError)
tt.assertErrorFromTagTemplate()
}
func TestFactoryTagTemplateErrorCreateCategoryForVM(t *testing.T) {
tt := newTagTest(t)
tt.govc.EXPECT().ListCategories(tt.ctx).Return(nil, nil)
tt.govc.EXPECT().ListTags(tt.ctx).Return(nil, nil)
tt.govc.EXPECT().CreateCategoryForVM(tt.ctx, gomock.Any()).Return(tt.dummyError)
tt.assertErrorFromTagTemplate()
}
func TestFactoryTagTemplateErrorCreateTag(t *testing.T) {
tt := newTagTest(t)
tt.govc.EXPECT().ListCategories(tt.ctx).Return(nil, nil)
tt.govc.EXPECT().ListTags(tt.ctx).Return(nil, nil)
tt.govc.EXPECT().CreateCategoryForVM(tt.ctx, gomock.Any()).Return(nil)
tt.govc.EXPECT().CreateTag(tt.ctx, gomock.Any(), gomock.Any()).Return(tt.dummyError)
tt.assertErrorFromTagTemplate()
}
func TestFactoryTagTemplateErrorAddTag(t *testing.T) {
tt := newTagTest(t)
tt.govc.EXPECT().ListCategories(tt.ctx).Return(nil, nil)
tt.govc.EXPECT().ListTags(tt.ctx).Return(nil, nil)
tt.govc.EXPECT().CreateCategoryForVM(tt.ctx, gomock.Any()).Return(nil)
tt.govc.EXPECT().CreateTag(tt.ctx, gomock.Any(), gomock.Any()).Return(nil)
tt.govc.EXPECT().AddTag(tt.ctx, tt.templatePath, gomock.Any()).Return(tt.dummyError)
tt.assertErrorFromTagTemplate()
}
func TestFactoryTagTemplateSuccess(t *testing.T) {
tt := newTagTest(t)
tt.govc.EXPECT().ListCategories(tt.ctx).Return([]string{"kubernetesChannel"}, nil)
tags := []executables.Tag{
{
Name: "eksd:1.19",
Id: "urn:vmomi:InventoryServiceTag:5555:GLOBAL",
CategoryId: "eksd",
},
}
tt.govc.EXPECT().ListTags(tt.ctx).Return(tags, nil)
tt.govc.EXPECT().CreateTag(tt.ctx, "kubernetesChannel:1.19", "kubernetesChannel").Return(nil)
tt.govc.EXPECT().AddTag(tt.ctx, tt.templatePath, "kubernetesChannel:1.19").Return(nil)
tt.govc.EXPECT().CreateCategoryForVM(tt.ctx, "eksd").Return(nil)
tt.govc.EXPECT().AddTag(tt.ctx, tt.templatePath, "eksd:1.19").Return(nil)
tt.govc.EXPECT().CreateTag(tt.ctx, "eksd:1.19.4", "eksd").Return(nil)
tt.govc.EXPECT().AddTag(tt.ctx, tt.templatePath, "eksd:1.19.4").Return(nil)
tt.assertSuccessFromTagTemplate()
}
| 136 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/providers/vsphere/internal/tags/factory.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
executables "github.com/aws/eks-anywhere/pkg/executables"
gomock "github.com/golang/mock/gomock"
)
// MockGovcClient is a mock of GovcClient interface.
type MockGovcClient struct {
ctrl *gomock.Controller
recorder *MockGovcClientMockRecorder
}
// MockGovcClientMockRecorder is the mock recorder for MockGovcClient.
type MockGovcClientMockRecorder struct {
mock *MockGovcClient
}
// NewMockGovcClient creates a new mock instance.
func NewMockGovcClient(ctrl *gomock.Controller) *MockGovcClient {
mock := &MockGovcClient{ctrl: ctrl}
mock.recorder = &MockGovcClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGovcClient) EXPECT() *MockGovcClientMockRecorder {
return m.recorder
}
// AddTag mocks base method.
func (m *MockGovcClient) AddTag(ctx context.Context, path, tag string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddTag", ctx, path, tag)
ret0, _ := ret[0].(error)
return ret0
}
// AddTag indicates an expected call of AddTag.
func (mr *MockGovcClientMockRecorder) AddTag(ctx, path, tag interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTag", reflect.TypeOf((*MockGovcClient)(nil).AddTag), ctx, path, tag)
}
// CreateCategoryForVM mocks base method.
func (m *MockGovcClient) CreateCategoryForVM(ctx context.Context, name string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateCategoryForVM", ctx, name)
ret0, _ := ret[0].(error)
return ret0
}
// CreateCategoryForVM indicates an expected call of CreateCategoryForVM.
func (mr *MockGovcClientMockRecorder) CreateCategoryForVM(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCategoryForVM", reflect.TypeOf((*MockGovcClient)(nil).CreateCategoryForVM), ctx, name)
}
// CreateTag mocks base method.
func (m *MockGovcClient) CreateTag(ctx context.Context, tag, category string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateTag", ctx, tag, category)
ret0, _ := ret[0].(error)
return ret0
}
// CreateTag indicates an expected call of CreateTag.
func (mr *MockGovcClientMockRecorder) CreateTag(ctx, tag, category interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTag", reflect.TypeOf((*MockGovcClient)(nil).CreateTag), ctx, tag, category)
}
// ListCategories mocks base method.
func (m *MockGovcClient) ListCategories(ctx context.Context) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListCategories", ctx)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListCategories indicates an expected call of ListCategories.
func (mr *MockGovcClientMockRecorder) ListCategories(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCategories", reflect.TypeOf((*MockGovcClient)(nil).ListCategories), ctx)
}
// ListTags mocks base method.
func (m *MockGovcClient) ListTags(ctx context.Context) ([]executables.Tag, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListTags", ctx)
ret0, _ := ret[0].([]executables.Tag)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListTags indicates an expected call of ListTags.
func (mr *MockGovcClientMockRecorder) ListTags(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTags", reflect.TypeOf((*MockGovcClient)(nil).ListTags), ctx)
}
| 109 |
eks-anywhere | aws | Go | package templates
import (
"context"
"fmt"
"path/filepath"
"strings"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/internal/tags"
)
const (
libraryContentCorrupted = "1"
libraryContentDoesNotExist = "-1"
)
type Factory struct {
client GovcClient
datacenter string
datastore string
network string
resourcePool string
templateLibrary string
tagsFactory *tags.Factory
}
type GovcClient interface {
CreateLibrary(ctx context.Context, datastore, library string) error
DeployTemplateFromLibrary(ctx context.Context, templateDir, templateName, library, datacenter, datastore, network, resourcePool string, resizeBRDisk bool) error
SearchTemplate(ctx context.Context, datacenter, template string) (string, error)
ImportTemplate(ctx context.Context, library, ovaURL, name string) error
LibraryElementExists(ctx context.Context, library string) (bool, error)
GetLibraryElementContentVersion(ctx context.Context, element string) (string, error)
DeleteLibraryElement(ctx context.Context, element string) error
ListTags(ctx context.Context) ([]executables.Tag, error)
CreateTag(ctx context.Context, tag, category string) error
AddTag(ctx context.Context, path, tag string) error
ListCategories(ctx context.Context) ([]string, error)
CreateCategoryForVM(ctx context.Context, name string) error
CreateUser(ctx context.Context, username string, password string) error
UserExists(ctx context.Context, username string) (bool, error)
CreateGroup(ctx context.Context, name string) error
GroupExists(ctx context.Context, name string) (bool, error)
AddUserToGroup(ctx context.Context, name string, username string) error
RoleExists(ctx context.Context, name string) (bool, error)
CreateRole(ctx context.Context, name string, privileges []string) error
SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error
}
func NewFactory(client GovcClient, datacenter, datastore, network, resourcePool, templateLibrary string) *Factory {
return &Factory{
client: client,
datacenter: datacenter,
datastore: datastore,
network: network,
resourcePool: resourcePool,
templateLibrary: templateLibrary,
tagsFactory: tags.NewFactory(client),
}
}
func (f *Factory) CreateIfMissing(ctx context.Context, datacenter string, machineConfig *v1alpha1.VSphereMachineConfig, ovaURL string, tagsByCategory map[string][]string) error {
templateFullPath, err := f.client.SearchTemplate(ctx, datacenter, machineConfig.Spec.Template)
if err != nil {
return fmt.Errorf("checking for template: %v", err)
}
if err == nil && len(templateFullPath) > 0 {
machineConfig.Spec.Template = templateFullPath // TODO: move this out of the factory into the defaulter, it's a side effect
logger.V(2).Info("Template already exists. Skipping creation", "template", machineConfig.Spec.Template)
return nil
}
logger.V(2).Info("Template not available. Creating", "template", machineConfig.Spec.Template)
osFamily := machineConfig.Spec.OSFamily
if err = f.createTemplate(ctx, machineConfig.Spec.Template, ovaURL, string(osFamily)); err != nil {
return err
}
if err = f.tagsFactory.TagTemplate(ctx, machineConfig.Spec.Template, tagsByCategory); err != nil {
return err
}
return nil
}
func (f *Factory) createTemplate(ctx context.Context, templatePath, ovaURL, osFamily string) error {
if err := f.createLibraryIfMissing(ctx); err != nil {
return err
}
logger.Info("Creating template. This might take a while.") // TODO: add rough estimate timing?
templateName := filepath.Base(templatePath)
templateDir := filepath.Dir(templatePath)
if err := f.importOVAIfMissing(ctx, templateName, ovaURL); err != nil {
return err
}
var resizeBRDisk bool
if strings.EqualFold(osFamily, string(v1alpha1.Bottlerocket)) {
resizeBRDisk = true
}
if err := f.client.DeployTemplateFromLibrary(ctx, templateDir, templateName, f.templateLibrary, f.datacenter, f.datastore, f.network, f.resourcePool, resizeBRDisk); err != nil {
return fmt.Errorf("failed deploying template: %v", err)
}
return nil
}
func (f *Factory) createLibraryIfMissing(ctx context.Context) error {
libraryExists, err := f.client.LibraryElementExists(ctx, f.templateLibrary)
if err != nil {
return fmt.Errorf("failed to validate library for new template: %v", err)
}
if !libraryExists {
logger.V(2).Info("Creating library", "library", f.templateLibrary)
if err = f.client.CreateLibrary(ctx, f.datastore, f.templateLibrary); err != nil {
return fmt.Errorf("failed creating library for new template: %v", err)
}
}
return nil
}
func (f *Factory) importOVAIfMissing(ctx context.Context, templateName, ovaURL string) error {
contentVersion, err := f.client.GetLibraryElementContentVersion(ctx, filepath.Join(f.templateLibrary, templateName))
if err != nil {
return fmt.Errorf("failed to validate template in library for new template: %v", err)
}
if contentVersion == libraryContentCorrupted {
err := f.client.DeleteLibraryElement(ctx, filepath.Join(f.templateLibrary, templateName))
if err != nil {
return fmt.Errorf("failed to delete old template in library: %v", err)
}
contentVersion = libraryContentDoesNotExist
}
if contentVersion == libraryContentDoesNotExist {
logger.V(2).Info("Importing template from ova url", "ova", ovaURL)
if err = f.client.ImportTemplate(ctx, f.templateLibrary, ovaURL, templateName); err != nil {
return fmt.Errorf("failed importing template into library: %v", err)
}
}
return nil
}
| 152 |
eks-anywhere | aws | Go | package templates_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/internal/templates"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/internal/templates/mocks"
)
type test struct {
t *testing.T
datacenter string
datastore string
network string
resourcePool string
templateLibrary string
resizeDisk2 bool
govc *mocks.MockGovcClient
factory *templates.Factory
ctx context.Context
dummyError error
libraryContentCorrupted string
libraryContentValid string
libraryContentDoesNotExist string
}
type createTest struct {
*test
datacenter string
machineConfig *v1alpha1.VSphereMachineConfig
templatePath string
templateName string
templateDir string
templateInLibrary string
ovaURL string
tagsByCategory map[string][]string
}
func newTest(t *testing.T) *test {
ctrl := gomock.NewController(t)
test := &test{
t: t,
datacenter: "SDDC-Datacenter",
datastore: "datastore",
network: "sddc-cgw-network-1",
resourcePool: "*/pool/",
templateLibrary: "library",
resizeDisk2: false,
govc: mocks.NewMockGovcClient(ctrl),
ctx: context.Background(),
dummyError: errors.New("error from govc"),
libraryContentCorrupted: "1",
libraryContentValid: "2",
libraryContentDoesNotExist: "-1",
}
f := templates.NewFactory(
test.govc,
test.datacenter,
test.datastore,
test.network,
test.resourcePool,
test.templateLibrary,
)
test.factory = f
return test
}
func newMachineConfig(t *testing.T) *v1alpha1.VSphereMachineConfig {
return &v1alpha1.VSphereMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Template: "/SDDC-Datacenter/vm/Templates/ubuntu-v1.19.8-eks-d-1-19-4-eks-a-0.0.1.build.38-amd64",
OSFamily: "ubuntu",
},
}
}
func newCreateTest(t *testing.T) *createTest {
test := newTest(t)
return &createTest{
test: test,
datacenter: "SDDC-Datacenter",
machineConfig: newMachineConfig(t),
templatePath: "/SDDC-Datacenter/vm/Templates/ubuntu-v1.19.8-eks-d-1-19-4-eks-a-0.0.1.build.38-amd64",
templateDir: "/SDDC-Datacenter/vm/Templates",
templateName: "ubuntu-v1.19.8-eks-d-1-19-4-eks-a-0.0.1.build.38-amd64",
templateInLibrary: "library/ubuntu-v1.19.8-eks-d-1-19-4-eks-a-0.0.1.build.38-amd64",
ovaURL: "https://amazonaws.com/artifacts/0.0.1/eks-distro/ova/1-19/1-19-4/ubuntu-v1.19.8-eks-d-1-19-4-eks-a-0.0.1.build.38-amd64.ova",
tagsByCategory: map[string][]string{},
}
}
func (ct *createTest) createIfMissing() error {
return ct.factory.CreateIfMissing(ct.ctx, ct.datacenter, ct.machineConfig, ct.ovaURL, ct.tagsByCategory)
}
func (ct *createTest) assertErrorFromCreateIfMissing() {
if err := ct.createIfMissing(); err == nil {
ct.t.Fatal("factory.CreateIfMissing() err = nil, want err not nil")
}
}
func (ct *createTest) assertSuccessFromCreateIfMissing() {
if err := ct.createIfMissing(); err != nil {
ct.t.Fatalf("factory.CreateIfMissing() err = %v, want err = nil", err)
}
}
func TestFactoryCreateIfMissingSearchTemplate(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return(ct.machineConfig.Spec.Template, nil)
ct.assertSuccessFromCreateIfMissing()
}
func TestFactoryCreateIfMissingErrorSearchTemplate(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", ct.dummyError) // error getting template
ct.assertErrorFromCreateIfMissing()
}
func TestFactoryCreateIfMissingErrorLibraryElementExists(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(false, ct.dummyError)
ct.assertErrorFromCreateIfMissing()
}
func TestFactoryCreateIfMissingErrorCreateLibrary(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(false, nil)
ct.govc.EXPECT().CreateLibrary(ct.ctx, ct.datastore, ct.templateLibrary).Return(ct.dummyError)
ct.assertErrorFromCreateIfMissing()
}
func TestFactoryCreateIfMissingErrorTemplateExistsInLibrary(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(false, nil)
ct.govc.EXPECT().CreateLibrary(ct.ctx, ct.datastore, ct.templateLibrary).Return(nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return("", ct.dummyError)
ct.assertErrorFromCreateIfMissing()
}
func TestFactoryCreateIfMissingErrorImport(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(false, nil)
ct.govc.EXPECT().CreateLibrary(ct.ctx, ct.datastore, ct.templateLibrary).Return(nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return(ct.libraryContentDoesNotExist, nil)
ct.govc.EXPECT().ImportTemplate(ct.ctx, ct.templateLibrary, ct.ovaURL, ct.templateName).Return(ct.dummyError)
ct.assertErrorFromCreateIfMissing()
}
func TestFactoryCreateIfMissingErrorDeploy(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(false, nil)
ct.govc.EXPECT().CreateLibrary(ct.ctx, ct.datastore, ct.templateLibrary).Return(nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return(ct.libraryContentDoesNotExist, nil)
ct.govc.EXPECT().ImportTemplate(ct.ctx, ct.templateLibrary, ct.ovaURL, ct.templateName).Return(nil)
ct.govc.EXPECT().DeployTemplateFromLibrary(
ct.ctx, ct.templateDir, ct.templateName, ct.templateLibrary, ct.datacenter, ct.datastore, ct.network, ct.resourcePool, ct.resizeDisk2,
).Return(ct.dummyError)
ct.assertErrorFromCreateIfMissing()
}
func TestFactoryCreateIfMissingErrorFromTagFactory(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(false, nil)
ct.govc.EXPECT().CreateLibrary(ct.ctx, ct.datastore, ct.templateLibrary).Return(nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return(ct.libraryContentDoesNotExist, nil)
ct.govc.EXPECT().ImportTemplate(ct.ctx, ct.templateLibrary, ct.ovaURL, ct.templateName).Return(nil)
ct.govc.EXPECT().DeployTemplateFromLibrary(
ct.ctx, ct.templateDir, ct.templateName, ct.templateLibrary, ct.datacenter, ct.datastore, ct.network, ct.resourcePool, ct.resizeDisk2,
).Return(nil)
// expects for tagging
ct.govc.EXPECT().ListCategories(ct.ctx).Return(nil, ct.dummyError)
ct.assertErrorFromCreateIfMissing()
}
func TestFactoryCreateIfMissingSuccessLibraryDoesNotExist(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(false, nil)
ct.govc.EXPECT().CreateLibrary(ct.ctx, ct.datastore, ct.templateLibrary).Return(nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return(ct.libraryContentDoesNotExist, nil)
ct.govc.EXPECT().ImportTemplate(ct.ctx, ct.templateLibrary, ct.ovaURL, ct.templateName).Return(nil)
ct.govc.EXPECT().DeployTemplateFromLibrary(
ct.ctx, ct.templateDir, ct.templateName, ct.templateLibrary, ct.datacenter, ct.datastore, ct.network, ct.resourcePool, ct.resizeDisk2,
).Return(nil)
// expects for tagging
ct.govc.EXPECT().ListCategories(ct.ctx).Return(nil, nil)
ct.govc.EXPECT().ListTags(ct.ctx).Return(nil, nil)
ct.assertSuccessFromCreateIfMissing()
}
func TestFactoryCreateIfMissingSuccessLibraryExists(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(true, nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return(ct.libraryContentDoesNotExist, nil)
ct.govc.EXPECT().ImportTemplate(ct.ctx, ct.templateLibrary, ct.ovaURL, ct.templateName).Return(nil)
ct.govc.EXPECT().DeployTemplateFromLibrary(
ct.ctx, ct.templateDir, ct.templateName, ct.templateLibrary, ct.datacenter, ct.datastore, ct.network, ct.resourcePool, ct.resizeDisk2,
).Return(nil)
// expects for tagging
ct.govc.EXPECT().ListCategories(ct.ctx).Return(nil, nil)
ct.govc.EXPECT().ListTags(ct.ctx).Return(nil, nil)
ct.assertSuccessFromCreateIfMissing()
}
func TestFactoryCreateIfMissingSuccessTemplateInLibraryExists(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(true, nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return(ct.libraryContentValid, nil)
ct.govc.EXPECT().DeployTemplateFromLibrary(
ct.ctx, ct.templateDir, ct.templateName, ct.templateLibrary, ct.datacenter, ct.datastore, ct.network, ct.resourcePool, ct.resizeDisk2,
).Return(nil)
// expects for tagging
ct.govc.EXPECT().ListCategories(ct.ctx).Return(nil, nil)
ct.govc.EXPECT().ListTags(ct.ctx).Return(nil, nil)
ct.assertSuccessFromCreateIfMissing()
}
func TestFactoryCreateIfMissingSuccessTemplateInLibraryCorrupted(t *testing.T) {
ct := newCreateTest(t)
ct.govc.EXPECT().SearchTemplate(ct.ctx, ct.datacenter, ct.machineConfig.Spec.Template).Return("", nil) // template not present
ct.govc.EXPECT().LibraryElementExists(ct.ctx, ct.templateLibrary).Return(true, nil)
ct.govc.EXPECT().GetLibraryElementContentVersion(ct.ctx, ct.templateInLibrary).Return(ct.libraryContentCorrupted, nil)
ct.govc.EXPECT().DeleteLibraryElement(ct.ctx, ct.templateInLibrary).Return(nil)
ct.govc.EXPECT().ImportTemplate(ct.ctx, ct.templateLibrary, ct.ovaURL, ct.templateName)
ct.govc.EXPECT().DeployTemplateFromLibrary(
ct.ctx, ct.templateDir, ct.templateName, ct.templateLibrary, ct.datacenter, ct.datastore, ct.network, ct.resourcePool, ct.resizeDisk2,
).Return(nil)
// expects for tagging
ct.govc.EXPECT().ListCategories(ct.ctx).Return(nil, nil)
ct.govc.EXPECT().ListTags(ct.ctx).Return(nil, nil)
ct.assertSuccessFromCreateIfMissing()
}
| 270 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/providers/vsphere/internal/templates/factory.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
executables "github.com/aws/eks-anywhere/pkg/executables"
gomock "github.com/golang/mock/gomock"
)
// MockGovcClient is a mock of GovcClient interface.
type MockGovcClient struct {
ctrl *gomock.Controller
recorder *MockGovcClientMockRecorder
}
// MockGovcClientMockRecorder is the mock recorder for MockGovcClient.
type MockGovcClientMockRecorder struct {
mock *MockGovcClient
}
// NewMockGovcClient creates a new mock instance.
func NewMockGovcClient(ctrl *gomock.Controller) *MockGovcClient {
mock := &MockGovcClient{ctrl: ctrl}
mock.recorder = &MockGovcClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGovcClient) EXPECT() *MockGovcClientMockRecorder {
return m.recorder
}
// AddTag mocks base method.
func (m *MockGovcClient) AddTag(ctx context.Context, path, tag string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddTag", ctx, path, tag)
ret0, _ := ret[0].(error)
return ret0
}
// AddTag indicates an expected call of AddTag.
func (mr *MockGovcClientMockRecorder) AddTag(ctx, path, tag interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTag", reflect.TypeOf((*MockGovcClient)(nil).AddTag), ctx, path, tag)
}
// AddUserToGroup mocks base method.
func (m *MockGovcClient) AddUserToGroup(ctx context.Context, name, username string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddUserToGroup", ctx, name, username)
ret0, _ := ret[0].(error)
return ret0
}
// AddUserToGroup indicates an expected call of AddUserToGroup.
func (mr *MockGovcClientMockRecorder) AddUserToGroup(ctx, name, username interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUserToGroup", reflect.TypeOf((*MockGovcClient)(nil).AddUserToGroup), ctx, name, username)
}
// CreateCategoryForVM mocks base method.
func (m *MockGovcClient) CreateCategoryForVM(ctx context.Context, name string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateCategoryForVM", ctx, name)
ret0, _ := ret[0].(error)
return ret0
}
// CreateCategoryForVM indicates an expected call of CreateCategoryForVM.
func (mr *MockGovcClientMockRecorder) CreateCategoryForVM(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCategoryForVM", reflect.TypeOf((*MockGovcClient)(nil).CreateCategoryForVM), ctx, name)
}
// CreateGroup mocks base method.
func (m *MockGovcClient) CreateGroup(ctx context.Context, name string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateGroup", ctx, name)
ret0, _ := ret[0].(error)
return ret0
}
// CreateGroup indicates an expected call of CreateGroup.
func (mr *MockGovcClientMockRecorder) CreateGroup(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroup", reflect.TypeOf((*MockGovcClient)(nil).CreateGroup), ctx, name)
}
// CreateLibrary mocks base method.
func (m *MockGovcClient) CreateLibrary(ctx context.Context, datastore, library string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateLibrary", ctx, datastore, library)
ret0, _ := ret[0].(error)
return ret0
}
// CreateLibrary indicates an expected call of CreateLibrary.
func (mr *MockGovcClientMockRecorder) CreateLibrary(ctx, datastore, library interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLibrary", reflect.TypeOf((*MockGovcClient)(nil).CreateLibrary), ctx, datastore, library)
}
// CreateRole mocks base method.
func (m *MockGovcClient) CreateRole(ctx context.Context, name string, privileges []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRole", ctx, name, privileges)
ret0, _ := ret[0].(error)
return ret0
}
// CreateRole indicates an expected call of CreateRole.
func (mr *MockGovcClientMockRecorder) CreateRole(ctx, name, privileges interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRole", reflect.TypeOf((*MockGovcClient)(nil).CreateRole), ctx, name, privileges)
}
// CreateTag mocks base method.
func (m *MockGovcClient) CreateTag(ctx context.Context, tag, category string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateTag", ctx, tag, category)
ret0, _ := ret[0].(error)
return ret0
}
// CreateTag indicates an expected call of CreateTag.
func (mr *MockGovcClientMockRecorder) CreateTag(ctx, tag, category interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTag", reflect.TypeOf((*MockGovcClient)(nil).CreateTag), ctx, tag, category)
}
// CreateUser mocks base method.
func (m *MockGovcClient) CreateUser(ctx context.Context, username, password string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateUser", ctx, username, password)
ret0, _ := ret[0].(error)
return ret0
}
// CreateUser indicates an expected call of CreateUser.
func (mr *MockGovcClientMockRecorder) CreateUser(ctx, username, password interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockGovcClient)(nil).CreateUser), ctx, username, password)
}
// DeleteLibraryElement mocks base method.
func (m *MockGovcClient) DeleteLibraryElement(ctx context.Context, element string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteLibraryElement", ctx, element)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteLibraryElement indicates an expected call of DeleteLibraryElement.
func (mr *MockGovcClientMockRecorder) DeleteLibraryElement(ctx, element interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLibraryElement", reflect.TypeOf((*MockGovcClient)(nil).DeleteLibraryElement), ctx, element)
}
// DeployTemplateFromLibrary mocks base method.
func (m *MockGovcClient) DeployTemplateFromLibrary(ctx context.Context, templateDir, templateName, library, datacenter, datastore, network, resourcePool string, resizeBRDisk bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeployTemplateFromLibrary", ctx, templateDir, templateName, library, datacenter, datastore, network, resourcePool, resizeBRDisk)
ret0, _ := ret[0].(error)
return ret0
}
// DeployTemplateFromLibrary indicates an expected call of DeployTemplateFromLibrary.
func (mr *MockGovcClientMockRecorder) DeployTemplateFromLibrary(ctx, templateDir, templateName, library, datacenter, datastore, network, resourcePool, resizeBRDisk interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeployTemplateFromLibrary", reflect.TypeOf((*MockGovcClient)(nil).DeployTemplateFromLibrary), ctx, templateDir, templateName, library, datacenter, datastore, network, resourcePool, resizeBRDisk)
}
// GetLibraryElementContentVersion mocks base method.
func (m *MockGovcClient) GetLibraryElementContentVersion(ctx context.Context, element string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetLibraryElementContentVersion", ctx, element)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetLibraryElementContentVersion indicates an expected call of GetLibraryElementContentVersion.
func (mr *MockGovcClientMockRecorder) GetLibraryElementContentVersion(ctx, element interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLibraryElementContentVersion", reflect.TypeOf((*MockGovcClient)(nil).GetLibraryElementContentVersion), ctx, element)
}
// GroupExists mocks base method.
func (m *MockGovcClient) GroupExists(ctx context.Context, name string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GroupExists", ctx, name)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GroupExists indicates an expected call of GroupExists.
func (mr *MockGovcClientMockRecorder) GroupExists(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupExists", reflect.TypeOf((*MockGovcClient)(nil).GroupExists), ctx, name)
}
// ImportTemplate mocks base method.
func (m *MockGovcClient) ImportTemplate(ctx context.Context, library, ovaURL, name string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImportTemplate", ctx, library, ovaURL, name)
ret0, _ := ret[0].(error)
return ret0
}
// ImportTemplate indicates an expected call of ImportTemplate.
func (mr *MockGovcClientMockRecorder) ImportTemplate(ctx, library, ovaURL, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTemplate", reflect.TypeOf((*MockGovcClient)(nil).ImportTemplate), ctx, library, ovaURL, name)
}
// LibraryElementExists mocks base method.
func (m *MockGovcClient) LibraryElementExists(ctx context.Context, library string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LibraryElementExists", ctx, library)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LibraryElementExists indicates an expected call of LibraryElementExists.
func (mr *MockGovcClientMockRecorder) LibraryElementExists(ctx, library interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LibraryElementExists", reflect.TypeOf((*MockGovcClient)(nil).LibraryElementExists), ctx, library)
}
// ListCategories mocks base method.
func (m *MockGovcClient) ListCategories(ctx context.Context) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListCategories", ctx)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListCategories indicates an expected call of ListCategories.
func (mr *MockGovcClientMockRecorder) ListCategories(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCategories", reflect.TypeOf((*MockGovcClient)(nil).ListCategories), ctx)
}
// ListTags mocks base method.
func (m *MockGovcClient) ListTags(ctx context.Context) ([]executables.Tag, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListTags", ctx)
ret0, _ := ret[0].([]executables.Tag)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListTags indicates an expected call of ListTags.
func (mr *MockGovcClientMockRecorder) ListTags(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTags", reflect.TypeOf((*MockGovcClient)(nil).ListTags), ctx)
}
// RoleExists mocks base method.
func (m *MockGovcClient) RoleExists(ctx context.Context, name string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RoleExists", ctx, name)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RoleExists indicates an expected call of RoleExists.
func (mr *MockGovcClientMockRecorder) RoleExists(ctx, name interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoleExists", reflect.TypeOf((*MockGovcClient)(nil).RoleExists), ctx, name)
}
// SearchTemplate mocks base method.
func (m *MockGovcClient) SearchTemplate(ctx context.Context, datacenter, template string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchTemplate", ctx, datacenter, template)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SearchTemplate indicates an expected call of SearchTemplate.
func (mr *MockGovcClientMockRecorder) SearchTemplate(ctx, datacenter, template interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchTemplate", reflect.TypeOf((*MockGovcClient)(nil).SearchTemplate), ctx, datacenter, template)
}
// SetGroupRoleOnObject mocks base method.
func (m *MockGovcClient) SetGroupRoleOnObject(ctx context.Context, principal, role, object, domain string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetGroupRoleOnObject", ctx, principal, role, object, domain)
ret0, _ := ret[0].(error)
return ret0
}
// SetGroupRoleOnObject indicates an expected call of SetGroupRoleOnObject.
func (mr *MockGovcClientMockRecorder) SetGroupRoleOnObject(ctx, principal, role, object, domain interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGroupRoleOnObject", reflect.TypeOf((*MockGovcClient)(nil).SetGroupRoleOnObject), ctx, principal, role, object, domain)
}
// UserExists mocks base method.
func (m *MockGovcClient) UserExists(ctx context.Context, username string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UserExists", ctx, username)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UserExists indicates an expected call of UserExists.
func (mr *MockGovcClientMockRecorder) UserExists(ctx, username interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UserExists", reflect.TypeOf((*MockGovcClient)(nil).UserExists), ctx, username)
}
| 325 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/providers/vsphere (interfaces: ProviderGovcClient,ProviderKubectlClient,IPValidator,VSphereClientBuilder)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
executables "github.com/aws/eks-anywhere/pkg/executables"
govmomi "github.com/aws/eks-anywhere/pkg/govmomi"
types "github.com/aws/eks-anywhere/pkg/types"
v1beta1 "github.com/aws/etcdadm-controller/api/v1beta1"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/api/core/v1"
v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1"
v1beta11 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
)
// MockProviderGovcClient is a mock of ProviderGovcClient interface.
type MockProviderGovcClient struct {
ctrl *gomock.Controller
recorder *MockProviderGovcClientMockRecorder
}
// MockProviderGovcClientMockRecorder is the mock recorder for MockProviderGovcClient.
type MockProviderGovcClientMockRecorder struct {
mock *MockProviderGovcClient
}
// NewMockProviderGovcClient creates a new mock instance.
func NewMockProviderGovcClient(ctrl *gomock.Controller) *MockProviderGovcClient {
mock := &MockProviderGovcClient{ctrl: ctrl}
mock.recorder = &MockProviderGovcClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockProviderGovcClient) EXPECT() *MockProviderGovcClientMockRecorder {
return m.recorder
}
// AddTag mocks base method.
func (m *MockProviderGovcClient) AddTag(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddTag", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// AddTag indicates an expected call of AddTag.
func (mr *MockProviderGovcClientMockRecorder) AddTag(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTag", reflect.TypeOf((*MockProviderGovcClient)(nil).AddTag), arg0, arg1, arg2)
}
// AddUserToGroup mocks base method.
func (m *MockProviderGovcClient) AddUserToGroup(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddUserToGroup", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// AddUserToGroup indicates an expected call of AddUserToGroup.
func (mr *MockProviderGovcClientMockRecorder) AddUserToGroup(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUserToGroup", reflect.TypeOf((*MockProviderGovcClient)(nil).AddUserToGroup), arg0, arg1, arg2)
}
// ConfigureCertThumbprint mocks base method.
func (m *MockProviderGovcClient) ConfigureCertThumbprint(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ConfigureCertThumbprint", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ConfigureCertThumbprint indicates an expected call of ConfigureCertThumbprint.
func (mr *MockProviderGovcClientMockRecorder) ConfigureCertThumbprint(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigureCertThumbprint", reflect.TypeOf((*MockProviderGovcClient)(nil).ConfigureCertThumbprint), arg0, arg1, arg2)
}
// CreateCategoryForVM mocks base method.
func (m *MockProviderGovcClient) CreateCategoryForVM(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateCategoryForVM", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateCategoryForVM indicates an expected call of CreateCategoryForVM.
func (mr *MockProviderGovcClientMockRecorder) CreateCategoryForVM(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCategoryForVM", reflect.TypeOf((*MockProviderGovcClient)(nil).CreateCategoryForVM), arg0, arg1)
}
// CreateGroup mocks base method.
func (m *MockProviderGovcClient) CreateGroup(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateGroup", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateGroup indicates an expected call of CreateGroup.
func (mr *MockProviderGovcClientMockRecorder) CreateGroup(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroup", reflect.TypeOf((*MockProviderGovcClient)(nil).CreateGroup), arg0, arg1)
}
// CreateLibrary mocks base method.
func (m *MockProviderGovcClient) CreateLibrary(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateLibrary", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateLibrary indicates an expected call of CreateLibrary.
func (mr *MockProviderGovcClientMockRecorder) CreateLibrary(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLibrary", reflect.TypeOf((*MockProviderGovcClient)(nil).CreateLibrary), arg0, arg1, arg2)
}
// CreateRole mocks base method.
func (m *MockProviderGovcClient) CreateRole(arg0 context.Context, arg1 string, arg2 []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRole", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateRole indicates an expected call of CreateRole.
func (mr *MockProviderGovcClientMockRecorder) CreateRole(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRole", reflect.TypeOf((*MockProviderGovcClient)(nil).CreateRole), arg0, arg1, arg2)
}
// CreateTag mocks base method.
func (m *MockProviderGovcClient) CreateTag(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateTag", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateTag indicates an expected call of CreateTag.
func (mr *MockProviderGovcClientMockRecorder) CreateTag(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTag", reflect.TypeOf((*MockProviderGovcClient)(nil).CreateTag), arg0, arg1, arg2)
}
// CreateUser mocks base method.
func (m *MockProviderGovcClient) CreateUser(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateUser", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateUser indicates an expected call of CreateUser.
func (mr *MockProviderGovcClientMockRecorder) CreateUser(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockProviderGovcClient)(nil).CreateUser), arg0, arg1, arg2)
}
// DatacenterExists mocks base method.
func (m *MockProviderGovcClient) DatacenterExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DatacenterExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DatacenterExists indicates an expected call of DatacenterExists.
func (mr *MockProviderGovcClientMockRecorder) DatacenterExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DatacenterExists", reflect.TypeOf((*MockProviderGovcClient)(nil).DatacenterExists), arg0, arg1)
}
// DeleteLibraryElement mocks base method.
func (m *MockProviderGovcClient) DeleteLibraryElement(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteLibraryElement", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteLibraryElement indicates an expected call of DeleteLibraryElement.
func (mr *MockProviderGovcClientMockRecorder) DeleteLibraryElement(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLibraryElement", reflect.TypeOf((*MockProviderGovcClient)(nil).DeleteLibraryElement), arg0, arg1)
}
// DeployTemplateFromLibrary mocks base method.
func (m *MockProviderGovcClient) DeployTemplateFromLibrary(arg0 context.Context, arg1, arg2, arg3, arg4, arg5, arg6, arg7 string, arg8 bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeployTemplateFromLibrary", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
ret0, _ := ret[0].(error)
return ret0
}
// DeployTemplateFromLibrary indicates an expected call of DeployTemplateFromLibrary.
func (mr *MockProviderGovcClientMockRecorder) DeployTemplateFromLibrary(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeployTemplateFromLibrary", reflect.TypeOf((*MockProviderGovcClient)(nil).DeployTemplateFromLibrary), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
}
// GetCertThumbprint mocks base method.
func (m *MockProviderGovcClient) GetCertThumbprint(arg0 context.Context) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCertThumbprint", arg0)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetCertThumbprint indicates an expected call of GetCertThumbprint.
func (mr *MockProviderGovcClientMockRecorder) GetCertThumbprint(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCertThumbprint", reflect.TypeOf((*MockProviderGovcClient)(nil).GetCertThumbprint), arg0)
}
// GetHardDiskSize mocks base method.
func (m *MockProviderGovcClient) GetHardDiskSize(arg0 context.Context, arg1, arg2 string) (map[string]float64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetHardDiskSize", arg0, arg1, arg2)
ret0, _ := ret[0].(map[string]float64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetHardDiskSize indicates an expected call of GetHardDiskSize.
func (mr *MockProviderGovcClientMockRecorder) GetHardDiskSize(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHardDiskSize", reflect.TypeOf((*MockProviderGovcClient)(nil).GetHardDiskSize), arg0, arg1, arg2)
}
// GetLibraryElementContentVersion mocks base method.
func (m *MockProviderGovcClient) GetLibraryElementContentVersion(arg0 context.Context, arg1 string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetLibraryElementContentVersion", arg0, arg1)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetLibraryElementContentVersion indicates an expected call of GetLibraryElementContentVersion.
func (mr *MockProviderGovcClientMockRecorder) GetLibraryElementContentVersion(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLibraryElementContentVersion", reflect.TypeOf((*MockProviderGovcClient)(nil).GetLibraryElementContentVersion), arg0, arg1)
}
// GetTags mocks base method.
func (m *MockProviderGovcClient) GetTags(arg0 context.Context, arg1 string) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetTags", arg0, arg1)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetTags indicates an expected call of GetTags.
func (mr *MockProviderGovcClientMockRecorder) GetTags(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTags", reflect.TypeOf((*MockProviderGovcClient)(nil).GetTags), arg0, arg1)
}
// GetVMDiskSizeInGB mocks base method.
func (m *MockProviderGovcClient) GetVMDiskSizeInGB(arg0 context.Context, arg1, arg2 string) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetVMDiskSizeInGB", arg0, arg1, arg2)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetVMDiskSizeInGB indicates an expected call of GetVMDiskSizeInGB.
func (mr *MockProviderGovcClientMockRecorder) GetVMDiskSizeInGB(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMDiskSizeInGB", reflect.TypeOf((*MockProviderGovcClient)(nil).GetVMDiskSizeInGB), arg0, arg1, arg2)
}
// GetWorkloadAvailableSpace mocks base method.
func (m *MockProviderGovcClient) GetWorkloadAvailableSpace(arg0 context.Context, arg1 string) (float64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetWorkloadAvailableSpace", arg0, arg1)
ret0, _ := ret[0].(float64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetWorkloadAvailableSpace indicates an expected call of GetWorkloadAvailableSpace.
func (mr *MockProviderGovcClientMockRecorder) GetWorkloadAvailableSpace(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkloadAvailableSpace", reflect.TypeOf((*MockProviderGovcClient)(nil).GetWorkloadAvailableSpace), arg0, arg1)
}
// GroupExists mocks base method.
func (m *MockProviderGovcClient) GroupExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GroupExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GroupExists indicates an expected call of GroupExists.
func (mr *MockProviderGovcClientMockRecorder) GroupExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupExists", reflect.TypeOf((*MockProviderGovcClient)(nil).GroupExists), arg0, arg1)
}
// ImportTemplate mocks base method.
func (m *MockProviderGovcClient) ImportTemplate(arg0 context.Context, arg1, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImportTemplate", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ImportTemplate indicates an expected call of ImportTemplate.
func (mr *MockProviderGovcClientMockRecorder) ImportTemplate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTemplate", reflect.TypeOf((*MockProviderGovcClient)(nil).ImportTemplate), arg0, arg1, arg2, arg3)
}
// IsCertSelfSigned mocks base method.
func (m *MockProviderGovcClient) IsCertSelfSigned(arg0 context.Context) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsCertSelfSigned", arg0)
ret0, _ := ret[0].(bool)
return ret0
}
// IsCertSelfSigned indicates an expected call of IsCertSelfSigned.
func (mr *MockProviderGovcClientMockRecorder) IsCertSelfSigned(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCertSelfSigned", reflect.TypeOf((*MockProviderGovcClient)(nil).IsCertSelfSigned), arg0)
}
// LibraryElementExists mocks base method.
func (m *MockProviderGovcClient) LibraryElementExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LibraryElementExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LibraryElementExists indicates an expected call of LibraryElementExists.
func (mr *MockProviderGovcClientMockRecorder) LibraryElementExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LibraryElementExists", reflect.TypeOf((*MockProviderGovcClient)(nil).LibraryElementExists), arg0, arg1)
}
// ListCategories mocks base method.
func (m *MockProviderGovcClient) ListCategories(arg0 context.Context) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListCategories", arg0)
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListCategories indicates an expected call of ListCategories.
func (mr *MockProviderGovcClientMockRecorder) ListCategories(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCategories", reflect.TypeOf((*MockProviderGovcClient)(nil).ListCategories), arg0)
}
// ListTags mocks base method.
func (m *MockProviderGovcClient) ListTags(arg0 context.Context) ([]executables.Tag, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListTags", arg0)
ret0, _ := ret[0].([]executables.Tag)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListTags indicates an expected call of ListTags.
func (mr *MockProviderGovcClientMockRecorder) ListTags(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTags", reflect.TypeOf((*MockProviderGovcClient)(nil).ListTags), arg0)
}
// NetworkExists mocks base method.
func (m *MockProviderGovcClient) NetworkExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NetworkExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NetworkExists indicates an expected call of NetworkExists.
func (mr *MockProviderGovcClientMockRecorder) NetworkExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkExists", reflect.TypeOf((*MockProviderGovcClient)(nil).NetworkExists), arg0, arg1)
}
// RoleExists mocks base method.
func (m *MockProviderGovcClient) RoleExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RoleExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RoleExists indicates an expected call of RoleExists.
func (mr *MockProviderGovcClientMockRecorder) RoleExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoleExists", reflect.TypeOf((*MockProviderGovcClient)(nil).RoleExists), arg0, arg1)
}
// SearchTemplate mocks base method.
func (m *MockProviderGovcClient) SearchTemplate(arg0 context.Context, arg1, arg2 string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchTemplate", arg0, arg1, arg2)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SearchTemplate indicates an expected call of SearchTemplate.
func (mr *MockProviderGovcClientMockRecorder) SearchTemplate(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchTemplate", reflect.TypeOf((*MockProviderGovcClient)(nil).SearchTemplate), arg0, arg1, arg2)
}
// SetGroupRoleOnObject mocks base method.
func (m *MockProviderGovcClient) SetGroupRoleOnObject(arg0 context.Context, arg1, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetGroupRoleOnObject", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// SetGroupRoleOnObject indicates an expected call of SetGroupRoleOnObject.
func (mr *MockProviderGovcClientMockRecorder) SetGroupRoleOnObject(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGroupRoleOnObject", reflect.TypeOf((*MockProviderGovcClient)(nil).SetGroupRoleOnObject), arg0, arg1, arg2, arg3, arg4)
}
// TemplateHasSnapshot mocks base method.
func (m *MockProviderGovcClient) TemplateHasSnapshot(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TemplateHasSnapshot", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TemplateHasSnapshot indicates an expected call of TemplateHasSnapshot.
func (mr *MockProviderGovcClientMockRecorder) TemplateHasSnapshot(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TemplateHasSnapshot", reflect.TypeOf((*MockProviderGovcClient)(nil).TemplateHasSnapshot), arg0, arg1)
}
// UserExists mocks base method.
func (m *MockProviderGovcClient) UserExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UserExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UserExists indicates an expected call of UserExists.
func (mr *MockProviderGovcClientMockRecorder) UserExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UserExists", reflect.TypeOf((*MockProviderGovcClient)(nil).UserExists), arg0, arg1)
}
// ValidateVCenterAuthentication mocks base method.
func (m *MockProviderGovcClient) ValidateVCenterAuthentication(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateVCenterAuthentication", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateVCenterAuthentication indicates an expected call of ValidateVCenterAuthentication.
func (mr *MockProviderGovcClientMockRecorder) ValidateVCenterAuthentication(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVCenterAuthentication", reflect.TypeOf((*MockProviderGovcClient)(nil).ValidateVCenterAuthentication), arg0)
}
// ValidateVCenterConnection mocks base method.
func (m *MockProviderGovcClient) ValidateVCenterConnection(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateVCenterConnection", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateVCenterConnection indicates an expected call of ValidateVCenterConnection.
func (mr *MockProviderGovcClientMockRecorder) ValidateVCenterConnection(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVCenterConnection", reflect.TypeOf((*MockProviderGovcClient)(nil).ValidateVCenterConnection), arg0, arg1)
}
// ValidateVCenterSetupMachineConfig mocks base method.
func (m *MockProviderGovcClient) ValidateVCenterSetupMachineConfig(arg0 context.Context, arg1 *v1alpha1.VSphereDatacenterConfig, arg2 *v1alpha1.VSphereMachineConfig, arg3 *bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateVCenterSetupMachineConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateVCenterSetupMachineConfig indicates an expected call of ValidateVCenterSetupMachineConfig.
func (mr *MockProviderGovcClientMockRecorder) ValidateVCenterSetupMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVCenterSetupMachineConfig", reflect.TypeOf((*MockProviderGovcClient)(nil).ValidateVCenterSetupMachineConfig), arg0, arg1, arg2, arg3)
}
// MockProviderKubectlClient is a mock of ProviderKubectlClient interface.
type MockProviderKubectlClient struct {
ctrl *gomock.Controller
recorder *MockProviderKubectlClientMockRecorder
}
// MockProviderKubectlClientMockRecorder is the mock recorder for MockProviderKubectlClient.
type MockProviderKubectlClientMockRecorder struct {
mock *MockProviderKubectlClient
}
// NewMockProviderKubectlClient creates a new mock instance.
func NewMockProviderKubectlClient(ctrl *gomock.Controller) *MockProviderKubectlClient {
mock := &MockProviderKubectlClient{ctrl: ctrl}
mock.recorder = &MockProviderKubectlClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockProviderKubectlClient) EXPECT() *MockProviderKubectlClientMockRecorder {
return m.recorder
}
// ApplyKubeSpecFromBytes mocks base method.
func (m *MockProviderKubectlClient) ApplyKubeSpecFromBytes(arg0 context.Context, arg1 *types.Cluster, arg2 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytes", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytes indicates an expected call of ApplyKubeSpecFromBytes.
func (mr *MockProviderKubectlClientMockRecorder) ApplyKubeSpecFromBytes(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytes", reflect.TypeOf((*MockProviderKubectlClient)(nil).ApplyKubeSpecFromBytes), arg0, arg1, arg2)
}
// ApplyTolerationsFromTaintsToDaemonSet mocks base method.
func (m *MockProviderKubectlClient) ApplyTolerationsFromTaintsToDaemonSet(arg0 context.Context, arg1, arg2 []v1.Taint, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyTolerationsFromTaintsToDaemonSet", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyTolerationsFromTaintsToDaemonSet indicates an expected call of ApplyTolerationsFromTaintsToDaemonSet.
func (mr *MockProviderKubectlClientMockRecorder) ApplyTolerationsFromTaintsToDaemonSet(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyTolerationsFromTaintsToDaemonSet", reflect.TypeOf((*MockProviderKubectlClient)(nil).ApplyTolerationsFromTaintsToDaemonSet), arg0, arg1, arg2, arg3, arg4)
}
// CreateNamespaceIfNotPresent mocks base method.
func (m *MockProviderKubectlClient) CreateNamespaceIfNotPresent(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateNamespaceIfNotPresent", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateNamespaceIfNotPresent indicates an expected call of CreateNamespaceIfNotPresent.
func (mr *MockProviderKubectlClientMockRecorder) CreateNamespaceIfNotPresent(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNamespaceIfNotPresent", reflect.TypeOf((*MockProviderKubectlClient)(nil).CreateNamespaceIfNotPresent), arg0, arg1, arg2)
}
// DeleteEksaDatacenterConfig mocks base method.
func (m *MockProviderKubectlClient) DeleteEksaDatacenterConfig(arg0 context.Context, arg1, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteEksaDatacenterConfig", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteEksaDatacenterConfig indicates an expected call of DeleteEksaDatacenterConfig.
func (mr *MockProviderKubectlClientMockRecorder) DeleteEksaDatacenterConfig(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksaDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).DeleteEksaDatacenterConfig), arg0, arg1, arg2, arg3, arg4)
}
// DeleteEksaMachineConfig mocks base method.
func (m *MockProviderKubectlClient) DeleteEksaMachineConfig(arg0 context.Context, arg1, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteEksaMachineConfig", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteEksaMachineConfig indicates an expected call of DeleteEksaMachineConfig.
func (mr *MockProviderKubectlClientMockRecorder) DeleteEksaMachineConfig(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksaMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).DeleteEksaMachineConfig), arg0, arg1, arg2, arg3, arg4)
}
// GetEksaCluster mocks base method.
func (m *MockProviderKubectlClient) GetEksaCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*v1alpha1.Cluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaCluster", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1alpha1.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaCluster indicates an expected call of GetEksaCluster.
func (mr *MockProviderKubectlClientMockRecorder) GetEksaCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaCluster), arg0, arg1, arg2)
}
// GetEksaVSphereDatacenterConfig mocks base method.
func (m *MockProviderKubectlClient) GetEksaVSphereDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.VSphereDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaVSphereDatacenterConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.VSphereDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaVSphereDatacenterConfig indicates an expected call of GetEksaVSphereDatacenterConfig.
func (mr *MockProviderKubectlClientMockRecorder) GetEksaVSphereDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaVSphereDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaVSphereDatacenterConfig), arg0, arg1, arg2, arg3)
}
// GetEksaVSphereMachineConfig mocks base method.
func (m *MockProviderKubectlClient) GetEksaVSphereMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.VSphereMachineConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaVSphereMachineConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.VSphereMachineConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaVSphereMachineConfig indicates an expected call of GetEksaVSphereMachineConfig.
func (mr *MockProviderKubectlClientMockRecorder) GetEksaVSphereMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaVSphereMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaVSphereMachineConfig), arg0, arg1, arg2, arg3)
}
// GetEtcdadmCluster mocks base method.
func (m *MockProviderKubectlClient) GetEtcdadmCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta1.EtcdadmCluster, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetEtcdadmCluster", varargs...)
ret0, _ := ret[0].(*v1beta1.EtcdadmCluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEtcdadmCluster indicates an expected call of GetEtcdadmCluster.
func (mr *MockProviderKubectlClientMockRecorder) GetEtcdadmCluster(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEtcdadmCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEtcdadmCluster), varargs...)
}
// GetKubeadmControlPlane mocks base method.
func (m *MockProviderKubectlClient) GetKubeadmControlPlane(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta11.KubeadmControlPlane, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetKubeadmControlPlane", varargs...)
ret0, _ := ret[0].(*v1beta11.KubeadmControlPlane)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetKubeadmControlPlane indicates an expected call of GetKubeadmControlPlane.
func (mr *MockProviderKubectlClientMockRecorder) GetKubeadmControlPlane(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKubeadmControlPlane", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetKubeadmControlPlane), varargs...)
}
// GetMachineDeployment mocks base method.
func (m *MockProviderKubectlClient) GetMachineDeployment(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) (*v1beta10.MachineDeployment, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetMachineDeployment", varargs...)
ret0, _ := ret[0].(*v1beta10.MachineDeployment)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetMachineDeployment indicates an expected call of GetMachineDeployment.
func (mr *MockProviderKubectlClientMockRecorder) GetMachineDeployment(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeployment", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetMachineDeployment), varargs...)
}
// GetSecretFromNamespace mocks base method.
func (m *MockProviderKubectlClient) GetSecretFromNamespace(arg0 context.Context, arg1, arg2, arg3 string) (*v1.Secret, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSecretFromNamespace", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1.Secret)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSecretFromNamespace indicates an expected call of GetSecretFromNamespace.
func (mr *MockProviderKubectlClientMockRecorder) GetSecretFromNamespace(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretFromNamespace", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetSecretFromNamespace), arg0, arg1, arg2, arg3)
}
// LoadSecret mocks base method.
func (m *MockProviderKubectlClient) LoadSecret(arg0 context.Context, arg1, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LoadSecret", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// LoadSecret indicates an expected call of LoadSecret.
func (mr *MockProviderKubectlClientMockRecorder) LoadSecret(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadSecret", reflect.TypeOf((*MockProviderKubectlClient)(nil).LoadSecret), arg0, arg1, arg2, arg3, arg4)
}
// RemoveAnnotationInNamespace mocks base method.
func (m *MockProviderKubectlClient) RemoveAnnotationInNamespace(arg0 context.Context, arg1, arg2, arg3 string, arg4 *types.Cluster, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveAnnotationInNamespace", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// RemoveAnnotationInNamespace indicates an expected call of RemoveAnnotationInNamespace.
func (mr *MockProviderKubectlClientMockRecorder) RemoveAnnotationInNamespace(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAnnotationInNamespace", reflect.TypeOf((*MockProviderKubectlClient)(nil).RemoveAnnotationInNamespace), arg0, arg1, arg2, arg3, arg4, arg5)
}
// SearchVsphereDatacenterConfig mocks base method.
func (m *MockProviderKubectlClient) SearchVsphereDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) ([]*v1alpha1.VSphereDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchVsphereDatacenterConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].([]*v1alpha1.VSphereDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SearchVsphereDatacenterConfig indicates an expected call of SearchVsphereDatacenterConfig.
func (mr *MockProviderKubectlClientMockRecorder) SearchVsphereDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchVsphereDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).SearchVsphereDatacenterConfig), arg0, arg1, arg2, arg3)
}
// SearchVsphereMachineConfig mocks base method.
func (m *MockProviderKubectlClient) SearchVsphereMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) ([]*v1alpha1.VSphereMachineConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SearchVsphereMachineConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].([]*v1alpha1.VSphereMachineConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SearchVsphereMachineConfig indicates an expected call of SearchVsphereMachineConfig.
func (mr *MockProviderKubectlClientMockRecorder) SearchVsphereMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchVsphereMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).SearchVsphereMachineConfig), arg0, arg1, arg2, arg3)
}
// SetDaemonSetImage mocks base method.
func (m *MockProviderKubectlClient) SetDaemonSetImage(arg0 context.Context, arg1, arg2, arg3, arg4, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetDaemonSetImage", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// SetDaemonSetImage indicates an expected call of SetDaemonSetImage.
func (mr *MockProviderKubectlClientMockRecorder) SetDaemonSetImage(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDaemonSetImage", reflect.TypeOf((*MockProviderKubectlClient)(nil).SetDaemonSetImage), arg0, arg1, arg2, arg3, arg4, arg5)
}
// UpdateAnnotation mocks base method.
func (m *MockProviderKubectlClient) UpdateAnnotation(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 ...executables.KubectlOpt) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2, arg3}
for _, a := range arg4 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateAnnotation", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateAnnotation indicates an expected call of UpdateAnnotation.
func (mr *MockProviderKubectlClientMockRecorder) UpdateAnnotation(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotation", reflect.TypeOf((*MockProviderKubectlClient)(nil).UpdateAnnotation), varargs...)
}
// MockIPValidator is a mock of IPValidator interface.
type MockIPValidator struct {
ctrl *gomock.Controller
recorder *MockIPValidatorMockRecorder
}
// MockIPValidatorMockRecorder is the mock recorder for MockIPValidator.
type MockIPValidatorMockRecorder struct {
mock *MockIPValidator
}
// NewMockIPValidator creates a new mock instance.
func NewMockIPValidator(ctrl *gomock.Controller) *MockIPValidator {
mock := &MockIPValidator{ctrl: ctrl}
mock.recorder = &MockIPValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockIPValidator) EXPECT() *MockIPValidatorMockRecorder {
return m.recorder
}
// ValidateControlPlaneIPUniqueness mocks base method.
func (m *MockIPValidator) ValidateControlPlaneIPUniqueness(arg0 *v1alpha1.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateControlPlaneIPUniqueness", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateControlPlaneIPUniqueness indicates an expected call of ValidateControlPlaneIPUniqueness.
func (mr *MockIPValidatorMockRecorder) ValidateControlPlaneIPUniqueness(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneIPUniqueness", reflect.TypeOf((*MockIPValidator)(nil).ValidateControlPlaneIPUniqueness), arg0)
}
// MockVSphereClientBuilder is a mock of VSphereClientBuilder interface.
type MockVSphereClientBuilder struct {
ctrl *gomock.Controller
recorder *MockVSphereClientBuilderMockRecorder
}
// MockVSphereClientBuilderMockRecorder is the mock recorder for MockVSphereClientBuilder.
type MockVSphereClientBuilderMockRecorder struct {
mock *MockVSphereClientBuilder
}
// NewMockVSphereClientBuilder creates a new mock instance.
func NewMockVSphereClientBuilder(ctrl *gomock.Controller) *MockVSphereClientBuilder {
mock := &MockVSphereClientBuilder{ctrl: ctrl}
mock.recorder = &MockVSphereClientBuilderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVSphereClientBuilder) EXPECT() *MockVSphereClientBuilderMockRecorder {
return m.recorder
}
// Build mocks base method.
func (m *MockVSphereClientBuilder) Build(arg0 context.Context, arg1, arg2, arg3 string, arg4 bool, arg5 string) (govmomi.VSphereClient, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Build", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(govmomi.VSphereClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Build indicates an expected call of Build.
func (mr *MockVSphereClientBuilderMockRecorder) Build(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Build", reflect.TypeOf((*MockVSphereClientBuilder)(nil).Build), arg0, arg1, arg2, arg3, arg4, arg5)
}
| 901 |
eks-anywhere | aws | Go | package reconciler_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/internal/test/envtest"
)
var env *envtest.Environment
func TestMain(m *testing.M) {
os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env)))
}
| 15 |
eks-anywhere | aws | Go | package reconciler
import (
"context"
"fmt"
"os"
"github.com/go-logr/logr"
"github.com/pkg/errors"
apiv1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
c "github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/controller/serverside"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
)
// CNIReconciler is an interface for reconciling CNI in the VSphere cluster reconciler.
type CNIReconciler interface {
Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *c.Spec) (controller.Result, error)
}
// RemoteClientRegistry is an interface that defines methods for remote clients.
type RemoteClientRegistry interface {
GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error)
}
// IPValidator is an interface that defines methods to validate the control plane IP.
type IPValidator interface {
ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error)
}
type Reconciler struct {
client client.Client
validator *vsphere.Validator
defaulter *vsphere.Defaulter
cniReconciler CNIReconciler
remoteClientRegistry RemoteClientRegistry
ipValidator IPValidator
*serverside.ObjectApplier
}
// New defines a new VSphere reconciler.
func New(client client.Client, validator *vsphere.Validator, defaulter *vsphere.Defaulter, cniReconciler CNIReconciler, remoteClientRegistry RemoteClientRegistry, ipValidator IPValidator) *Reconciler {
return &Reconciler{
client: client,
validator: validator,
defaulter: defaulter,
cniReconciler: cniReconciler,
remoteClientRegistry: remoteClientRegistry,
ipValidator: ipValidator,
ObjectApplier: serverside.NewObjectApplier(client),
}
}
func VsphereCredentials(ctx context.Context, cli client.Client) (*apiv1.Secret, error) {
secret := &apiv1.Secret{}
secretKey := client.ObjectKey{
Namespace: "eksa-system",
Name: vsphere.CredentialsObjectName,
}
if err := cli.Get(ctx, secretKey, secret); err != nil {
return nil, err
}
return secret, nil
}
func SetupEnvVars(ctx context.Context, vsphereDatacenter *anywherev1.VSphereDatacenterConfig, cli client.Client) error {
secret, err := VsphereCredentials(ctx, cli)
if err != nil {
return fmt.Errorf("failed getting vsphere credentials secret: %v", err)
}
vsphereUsername := secret.Data["username"]
vspherePassword := secret.Data["password"]
if err := os.Setenv(config.EksavSphereUsernameKey, string(vsphereUsername)); err != nil {
return fmt.Errorf("failed setting env %s: %v", config.EksavSphereUsernameKey, err)
}
if err := os.Setenv(config.EksavSpherePasswordKey, string(vspherePassword)); err != nil {
return fmt.Errorf("failed setting env %s: %v", config.EksavSpherePasswordKey, err)
}
vsphereCPUsername := secret.Data["usernameCP"]
vsphereCPPassword := secret.Data["passwordCP"]
if err := os.Setenv(config.EksavSphereCPUsernameKey, string(vsphereCPUsername)); err != nil {
return fmt.Errorf("failed setting env %s: %v", config.EksavSphereCPUsernameKey, err)
}
if err := os.Setenv(config.EksavSphereCPPasswordKey, string(vsphereCPPassword)); err != nil {
return fmt.Errorf("failed setting env %s: %v", config.EksavSphereCPPasswordKey, err)
}
if err := vsphere.SetupEnvVars(vsphereDatacenter); err != nil {
return fmt.Errorf("failed setting env vars: %v", err)
}
return nil
}
func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
log = log.WithValues("provider", "vsphere")
clusterSpec, err := c.BuildSpec(ctx, clientutil.NewKubeClient(r.client), cluster)
if err != nil {
return controller.Result{}, err
}
return controller.NewPhaseRunner[*c.Spec]().Register(
r.ipValidator.ValidateControlPlaneIP,
r.ValidateDatacenterConfig,
r.ValidateMachineConfigs,
clusters.CleanupStatusAfterValidate,
r.ReconcileControlPlane,
r.CheckControlPlaneReady,
r.ReconcileCNI,
r.ReconcileWorkers,
).Run(ctx, log, clusterSpec)
}
// ReconcileWorkerNodes validates the cluster definition and reconciles the worker nodes
// to the desired state.
func (r *Reconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
log = log.WithValues("provider", "vsphere", "reconcile type", "workers")
clusterSpec, err := c.BuildSpec(ctx, clientutil.NewKubeClient(r.client), cluster)
if err != nil {
return controller.Result{}, err
}
return controller.NewPhaseRunner[*c.Spec]().Register(
r.ValidateDatacenterConfig,
r.ValidateMachineConfigs,
r.ReconcileWorkers,
).Run(ctx, log, clusterSpec)
}
// ValidateDatacenterConfig updates the cluster status if the VSphereDatacenter status indicates that the spec is invalid.
func (r *Reconciler) ValidateDatacenterConfig(ctx context.Context, log logr.Logger, clusterSpec *c.Spec) (controller.Result, error) {
log = log.WithValues("phase", "validateDatacenterConfig")
dataCenterConfig := clusterSpec.VSphereDatacenter
if !dataCenterConfig.Status.SpecValid {
if dataCenterConfig.Status.FailureMessage != nil {
failureMessage := fmt.Sprintf("Invalid %s VSphereDatacenterConfig: %s", dataCenterConfig.Name, *dataCenterConfig.Status.FailureMessage)
clusterSpec.Cluster.Status.FailureMessage = &failureMessage
log.Error(errors.New(*dataCenterConfig.Status.FailureMessage), "Invalid VSphereDatacenterConfig", "datacenterConfig", klog.KObj(dataCenterConfig))
} else {
log.Info("VSphereDatacenterConfig hasn't been validated yet", klog.KObj(dataCenterConfig))
}
return controller.ResultWithReturn(), nil
}
return controller.Result{}, nil
}
// ValidateMachineConfigs performs additional, context-aware validations on the machine configs.
func (r *Reconciler) ValidateMachineConfigs(ctx context.Context, log logr.Logger, clusterSpec *c.Spec) (controller.Result, error) {
log = log.WithValues("phase", "validateMachineConfigs")
datacenterConfig := clusterSpec.VSphereDatacenter
// Set up env vars for executing Govc cmd
if err := SetupEnvVars(ctx, datacenterConfig, r.client); err != nil {
log.Error(err, "Failed to set up env vars for Govc")
return controller.Result{}, err
}
vsphereClusterSpec := vsphere.NewSpec(clusterSpec)
if err := r.validator.ValidateClusterMachineConfigs(ctx, vsphereClusterSpec); err != nil {
log.Error(err, "Invalid VSphereMachineConfig")
failureMessage := err.Error()
clusterSpec.Cluster.Status.FailureMessage = &failureMessage
return controller.ResultWithReturn(), nil
}
return controller.Result{}, nil
}
// ReconcileControlPlane applies the control plane CAPI objects to the cluster.
func (r *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error) {
log = log.WithValues("phase", "reconcileControlPlane")
log.Info("Applying control plane CAPI objects")
cp, err := vsphere.ControlPlaneSpec(ctx, log, clientutil.NewKubeClient(r.client), spec)
if err != nil {
return controller.Result{}, err
}
return clusters.ReconcileControlPlane(ctx, r.client, toClientControlPlane(cp))
}
// CheckControlPlaneReady checks whether the control plane for an eks-a cluster is ready or not.
// Requeues with the appropriate wait times whenever the cluster is not ready yet.
func (r *Reconciler) CheckControlPlaneReady(ctx context.Context, log logr.Logger, clusterSpec *c.Spec) (controller.Result, error) {
log = log.WithValues("phase", "checkControlPlaneReady")
return clusters.CheckControlPlaneReady(ctx, r.client, log, clusterSpec.Cluster)
}
// ReconcileCNI takes the Cilium CNI in a cluster to the desired state defined in a cluster spec.
func (r *Reconciler) ReconcileCNI(ctx context.Context, log logr.Logger, clusterSpec *c.Spec) (controller.Result, error) {
log = log.WithValues("phase", "reconcileCNI")
client, err := r.remoteClientRegistry.GetClient(ctx, controller.CapiClusterObjectKey(clusterSpec.Cluster))
if err != nil {
return controller.Result{}, err
}
return r.cniReconciler.Reconcile(ctx, log, client, clusterSpec)
}
// ReconcileWorkers applies the worker CAPI objects to the cluster.
func (r *Reconciler) ReconcileWorkers(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error) {
log = log.WithValues("phase", "reconcileWorkers")
log.Info("Applying worker CAPI objects")
w, err := vsphere.WorkersSpec(ctx, log, clientutil.NewKubeClient(r.client), spec)
if err != nil {
return controller.Result{}, err
}
return clusters.ReconcileWorkersForEKSA(ctx, log, r.client, spec.Cluster, clusters.ToWorkers(w))
}
func toClientControlPlane(cp *vsphere.ControlPlane) *clusters.ControlPlane {
other := make([]client.Object, 0, len(cp.ConfigMaps)+len(cp.Secrets)+len(cp.ClusterResourceSets)+1)
for _, o := range cp.ClusterResourceSets {
other = append(other, o)
}
for _, o := range cp.ConfigMaps {
other = append(other, o)
}
for _, o := range cp.Secrets {
other = append(other, o)
}
return &clusters.ControlPlane{
Cluster: cp.Cluster,
ProviderCluster: cp.ProviderCluster,
KubeadmControlPlane: cp.KubeadmControlPlane,
ControlPlaneMachineTemplate: cp.ControlPlaneMachineTemplate,
EtcdCluster: cp.EtcdCluster,
EtcdMachineTemplate: cp.EtcdMachineTemplate,
Other: other,
}
}
| 249 |
eks-anywhere | aws | Go | package reconciler_test
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
vspherev1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
clusterspec "github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/govmomi"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/mocks"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/reconciler"
vspherereconcilermocks "github.com/aws/eks-anywhere/pkg/providers/vsphere/reconciler/mocks"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
clusterNamespace = "test-namespace"
)
func TestReconcilerReconcileSuccess(t *testing.T) {
tt := newReconcilerTest(t)
// We want to check that the cluster status is cleaned up if validations are passed
tt.cluster.Status.FailureMessage = ptr.String("invalid cluster")
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.createAllObjs()
logger := test.NewNullLogger()
remoteClient := env.Client()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, tt.buildSpec()).Return(controller.Result{}, nil)
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
tt.govcClient.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, gomock.Any()).Return("test", nil)
tt.govcClient.EXPECT().GetTags(tt.ctx, tt.machineConfigControlPlane.Spec.Template).Return([]string{"os:ubuntu", fmt.Sprintf("eksdRelease:%s", tt.bundle.Spec.VersionsBundles[0].EksD.Name)}, nil)
tt.govcClient.EXPECT().ListTags(tt.ctx).Return([]executables.Tag{}, nil)
tt.remoteClientRegistry.EXPECT().GetClient(
tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
).Return(remoteClient, nil).Times(1)
tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec())
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.Expect(tt.cluster.Status.FailureMessage).To(BeNil())
}
func TestReconcilerReconcileWorkerNodesSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.cluster.Name = "my-management-cluster"
tt.cluster.SetSelfManaged()
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.createAllObjs()
logger := test.NewNullLogger()
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
tt.govcClient.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, gomock.Any()).Return("test", nil)
tt.govcClient.EXPECT().GetTags(tt.ctx, tt.machineConfigControlPlane.Spec.Template).Return([]string{"os:ubuntu", fmt.Sprintf("eksdRelease:%s", tt.bundle.Spec.VersionsBundles[0].EksD.Name)}, nil)
tt.govcClient.EXPECT().ListTags(tt.ctx).Return([]executables.Tag{}, nil)
result, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.ShouldEventuallyExist(tt.ctx,
&bootstrapv1.KubeadmConfigTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&vspherev1.VSphereMachineTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster-md-0-1",
Namespace: constants.EksaSystemNamespace,
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&clusterv1.MachineDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster-md-0",
Namespace: constants.EksaSystemNamespace,
},
},
)
}
func TestReconcilerFailToSetUpMachineConfigCP(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.withFakeClient()
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, tt.datacenterConfig, tt.machineConfigControlPlane, gomock.Any()).Return(fmt.Errorf("error"))
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, tt.datacenterConfig, tt.machineConfigWorker, gomock.Any()).Return(nil).MaxTimes(1)
tt.govcClient.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, tt.machineConfigControlPlane).Return("test", nil).Times(0)
tt.govcClient.EXPECT().GetTags(tt.ctx, tt.machineConfigControlPlane.Spec.Template).Return([]string{"os:ubuntu", fmt.Sprintf("eksdRelease:%s", tt.bundle.Spec.VersionsBundles[0].EksD.Name)}, nil).Times(0)
result, err := tt.reconciler().ValidateMachineConfigs(tt.ctx, logger, tt.buildSpec())
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.Expect(tt.cluster.Status.FailureMessage).To(HaveValue(ContainSubstring("validating vCenter setup for VSphereMachineConfig")))
}
func TestSetupEnvVars(t *testing.T) {
tt := newReconcilerTest(t)
tt.withFakeClient()
err := reconciler.SetupEnvVars(context.Background(), tt.datacenterConfig, tt.client)
tt.Expect(os.Getenv(config.EksavSphereUsernameKey)).To(Equal("user"))
tt.Expect(os.Getenv(config.EksavSpherePasswordKey)).To(Equal("pass"))
tt.Expect(os.Getenv(config.EksavSphereCPUsernameKey)).To(Equal("userCP"))
tt.Expect(os.Getenv(config.EksavSphereCPPasswordKey)).To(Equal("passCP"))
tt.Expect(err).To(BeNil())
}
func TestReconcilerControlPlaneIsNotReady(t *testing.T) {
tt := newReconcilerTest(t)
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
capiCluster.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ControlPlaneReadyCondition,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.NewTime(time.Now()),
},
}
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.createAllObjs()
logger := test.NewNullLogger()
tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, tt.buildSpec()).Return(controller.Result{}, nil)
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
tt.govcClient.EXPECT().ValidateVCenterSetupMachineConfig(tt.ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
tt.govcClient.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, gomock.Any()).Return("test", nil)
tt.govcClient.EXPECT().GetTags(tt.ctx, tt.machineConfigControlPlane.Spec.Template).Return([]string{"os:ubuntu", fmt.Sprintf("eksdRelease:%s", tt.bundle.Spec.VersionsBundles[0].EksD.Name)}, nil)
tt.govcClient.EXPECT().ListTags(tt.ctx).Return([]executables.Tag{}, nil)
result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.ResultWithRequeue(30 * time.Second)))
}
func TestReconcilerReconcileWorkersSuccess(t *testing.T) {
tt := newReconcilerTest(t)
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = tt.cluster.Name
})
tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
tt.createAllObjs()
result, err := tt.reconciler().ReconcileWorkers(tt.ctx, test.NewNullLogger(), tt.buildSpec())
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
}
func TestReconcilerReconcileInvalidDatacenterConfig(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.datacenterConfig.Status.SpecValid = false
m := "Something wrong"
tt.datacenterConfig.Status.FailureMessage = &m
tt.withFakeClient()
result, err := tt.reconciler().ValidateDatacenterConfig(tt.ctx, logger, tt.buildSpec())
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.Expect(tt.cluster.Status.FailureMessage).To(HaveValue(ContainSubstring("Something wrong")))
}
func TestReconcilerDatacenterConfigNotValidated(t *testing.T) {
tt := newReconcilerTest(t)
logger := test.NewNullLogger()
tt.datacenterConfig.Status.SpecValid = false
tt.withFakeClient()
result, err := tt.reconciler().ValidateDatacenterConfig(tt.ctx, logger, tt.buildSpec())
tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue")
tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation")
tt.Expect(tt.cluster.Status.FailureMessage).To(BeNil())
}
func TestReconcileCNISuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.withFakeClient()
logger := test.NewNullLogger()
remoteClient := fake.NewClientBuilder().Build()
spec := tt.buildSpec()
tt.remoteClientRegistry.EXPECT().GetClient(
tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
).Return(remoteClient, nil)
tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, spec)
result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec)
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
}
func TestReconcileCNIErrorClientRegistry(t *testing.T) {
tt := newReconcilerTest(t)
tt.withFakeClient()
logger := test.NewNullLogger()
spec := tt.buildSpec()
tt.remoteClientRegistry.EXPECT().GetClient(
tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
).Return(nil, errors.New("building client"))
result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec)
tt.Expect(err).To(MatchError(ContainSubstring("building client")))
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
}
func TestReconcilerReconcileControlPlaneSuccess(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, test.NewNullLogger(), tt.buildSpec())
tt.Expect(err).NotTo(HaveOccurred())
tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
tt.Expect(result).To(Equal(controller.Result{}))
tt.ShouldEventuallyExist(tt.ctx,
&addonsv1.ClusterResourceSet{
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster-cpi",
Namespace: "eksa-system",
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&controlplanev1.KubeadmControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster",
Namespace: "eksa-system",
},
},
)
tt.ShouldEventuallyExist(tt.ctx,
&vspherev1.VSphereMachineTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster-control-plane-1",
Namespace: "eksa-system",
},
},
)
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = "workload-cluster"
})
tt.ShouldEventuallyExist(tt.ctx, capiCluster)
tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-cloud-controller-manager", Namespace: "eksa-system"}})
tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-cloud-provider-vsphere-credentials", Namespace: "eksa-system"}})
tt.ShouldEventuallyExist(tt.ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-cpi-manifests", Namespace: "eksa-system"}})
}
func TestReconcilerReconcileControlPlaneFailure(t *testing.T) {
tt := newReconcilerTest(t)
tt.createAllObjs()
spec := tt.buildSpec()
spec.Cluster.Spec.KubernetesVersion = ""
_, err := tt.reconciler().ReconcileControlPlane(tt.ctx, test.NewNullLogger(), spec)
tt.Expect(err).To(HaveOccurred())
}
type reconcilerTest struct {
t testing.TB
*WithT
*envtest.APIExpecter
ctx context.Context
cniReconciler *vspherereconcilermocks.MockCNIReconciler
govcClient *mocks.MockProviderGovcClient
validator *vsphere.Validator
defaulter *vsphere.Defaulter
remoteClientRegistry *vspherereconcilermocks.MockRemoteClientRegistry
cluster *anywherev1.Cluster
client client.Client
env *envtest.Environment
bundle *releasev1.Bundles
eksaSupportObjs []client.Object
datacenterConfig *anywherev1.VSphereDatacenterConfig
machineConfigControlPlane *anywherev1.VSphereMachineConfig
machineConfigWorker *anywherev1.VSphereMachineConfig
ipValidator *vspherereconcilermocks.MockIPValidator
}
func newReconcilerTest(t testing.TB) *reconcilerTest {
ctrl := gomock.NewController(t)
cniReconciler := vspherereconcilermocks.NewMockCNIReconciler(ctrl)
remoteClientRegistry := vspherereconcilermocks.NewMockRemoteClientRegistry(ctrl)
c := env.Client()
govcClient := mocks.NewMockProviderGovcClient(ctrl)
vcb := govmomi.NewVMOMIClientBuilder()
validator := vsphere.NewValidator(govcClient, vcb)
defaulter := vsphere.NewDefaulter(govcClient)
ipValidator := vspherereconcilermocks.NewMockIPValidator(ctrl)
bundle := test.Bundle()
managementCluster := vsphereCluster(func(c *anywherev1.Cluster) {
c.Name = "management-cluster"
c.Spec.ManagementCluster = anywherev1.ManagementCluster{
Name: c.Name,
}
c.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: bundle.Name,
Namespace: bundle.Namespace,
APIVersion: bundle.APIVersion,
}
})
machineConfigCP := machineConfig(func(m *anywherev1.VSphereMachineConfig) {
m.Name = "cp-machine-config"
})
machineConfigWN := machineConfig(func(m *anywherev1.VSphereMachineConfig) {
m.Name = "worker-machine-config"
})
credentialsSecret := createSecret()
workloadClusterDatacenter := dataCenter(func(d *anywherev1.VSphereDatacenterConfig) {
d.Status.SpecValid = true
})
cluster := vsphereCluster(func(c *anywherev1.Cluster) {
c.Name = "workload-cluster"
c.Spec.ManagementCluster = anywherev1.ManagementCluster{
Name: managementCluster.Name,
}
c.Spec.BundlesRef = &anywherev1.BundlesRef{
Name: bundle.Name,
Namespace: bundle.Namespace,
APIVersion: bundle.APIVersion,
}
c.Spec.ControlPlaneConfiguration = anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: machineConfigCP.Name,
},
}
c.Spec.DatacenterRef = anywherev1.Ref{
Kind: anywherev1.VSphereDatacenterKind,
Name: workloadClusterDatacenter.Name,
}
c.Spec.WorkerNodeGroupConfigurations = append(c.Spec.WorkerNodeGroupConfigurations,
anywherev1.WorkerNodeGroupConfiguration{
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: machineConfigWN.Name,
},
Name: "md-0",
Labels: nil,
},
)
})
tt := &reconcilerTest{
t: t,
WithT: NewWithT(t),
APIExpecter: envtest.NewAPIExpecter(t, c),
ctx: context.Background(),
cniReconciler: cniReconciler,
govcClient: govcClient,
validator: validator,
defaulter: defaulter,
ipValidator: ipValidator,
remoteClientRegistry: remoteClientRegistry,
client: c,
env: env,
eksaSupportObjs: []client.Object{
test.Namespace(clusterNamespace),
test.Namespace(constants.EksaSystemNamespace),
managementCluster,
workloadClusterDatacenter,
bundle,
test.EksdRelease(),
credentialsSecret,
},
bundle: bundle,
cluster: cluster,
datacenterConfig: workloadClusterDatacenter,
machineConfigControlPlane: machineConfigCP,
machineConfigWorker: machineConfigWN,
}
t.Cleanup(tt.cleanup)
return tt
}
func (tt *reconcilerTest) cleanup() {
tt.DeleteAndWait(tt.ctx, tt.allObjs()...)
tt.DeleteAllOfAndWait(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{})
tt.DeleteAllOfAndWait(tt.ctx, &vspherev1.VSphereMachineTemplate{})
tt.DeleteAllOfAndWait(tt.ctx, &clusterv1.MachineDeployment{})
}
func (tt *reconcilerTest) buildSpec() *clusterspec.Spec {
tt.t.Helper()
spec, err := clusterspec.BuildSpec(tt.ctx, clientutil.NewKubeClient(tt.client), tt.cluster)
tt.Expect(err).NotTo(HaveOccurred())
return spec
}
func (tt *reconcilerTest) withFakeClient() {
tt.client = fake.NewClientBuilder().WithObjects(clientutil.ObjectsToClientObjects(tt.allObjs())...).Build()
}
func (tt *reconcilerTest) reconciler() *reconciler.Reconciler {
return reconciler.New(tt.client, tt.validator, tt.defaulter, tt.cniReconciler, tt.remoteClientRegistry, tt.ipValidator)
}
func (tt *reconcilerTest) createAllObjs() {
tt.t.Helper()
envtest.CreateObjs(tt.ctx, tt.t, tt.client, tt.allObjs()...)
}
func (tt *reconcilerTest) allObjs() []client.Object {
objs := make([]client.Object, 0, len(tt.eksaSupportObjs)+3)
objs = append(objs, tt.eksaSupportObjs...)
objs = append(objs, tt.cluster, tt.machineConfigControlPlane, tt.machineConfigWorker)
return objs
}
type clusterOpt func(*anywherev1.Cluster)
func vsphereCluster(opts ...clusterOpt) *anywherev1.Cluster {
c := &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterNamespace,
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.22",
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"0.0.0.0"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"0.0.0.0"},
},
},
},
}
for _, opt := range opts {
opt(c)
}
return c
}
type datacenterOpt func(config *anywherev1.VSphereDatacenterConfig)
func dataCenter(opts ...datacenterOpt) *anywherev1.VSphereDatacenterConfig {
d := &anywherev1.VSphereDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.VSphereDatacenterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "datacenter",
Namespace: clusterNamespace,
},
}
for _, opt := range opts {
opt(d)
}
return d
}
type vsphereMachineOpt func(config *anywherev1.VSphereMachineConfig)
func machineConfig(opts ...vsphereMachineOpt) *anywherev1.VSphereMachineConfig {
m := &anywherev1.VSphereMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.VSphereMachineConfigKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterNamespace,
},
Spec: anywherev1.VSphereMachineConfigSpec{
DiskGiB: 40,
Datastore: "test",
Folder: "test",
NumCPUs: 2,
MemoryMiB: 16,
OSFamily: "ubuntu",
ResourcePool: "test",
StoragePolicyName: "test",
Template: "test",
Users: []anywherev1.UserConfiguration{
{
Name: "user",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8ZEibIrz1AUBKDvmDiWLs9f5DnOerC4qPITiDtSOuPAsxgZbRMavBfVTxodMdAkYRYlXxK6PqNo0ve0qcOV2yvpxH1OogasMMetck6BlM/dIoo3vEY4ZoG9DuVRIf9Iry5gJKbpMDYWpx1IGZrDMOFcIM20ii2qLQQk5hfq9OqdqhToEJFixdgJt/y/zt6Koy3kix+XsnrVdAHgWAq4CZuwt1G6JUAqrpob3H8vPmL7aS+35ktf0pHBm6nYoxRhslnWMUb/7vpzWiq+fUBIm2LYqvrnm7t3fRqFx7p2sZqAm2jDNivyYXwRXkoQPR96zvGeMtuQ5BVGPpsDfVudSW21+pEXHI0GINtTbua7Ogz7wtpVywSvHraRgdFOeY9mkXPzvm2IhoqNrteck2GErwqSqb19mPz6LnHueK0u7i6WuQWJn0CUoCtyMGIrowXSviK8qgHXKrmfTWATmCkbtosnLskNdYuOw8bKxq5S4WgdQVhPps2TiMSZndjX5NTr8= ubuntu@ip-10-2-0-6"},
},
},
},
}
for _, opt := range opts {
opt(m)
}
return m
}
func createSecret() *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "eksa-system",
Name: vsphere.CredentialsObjectName,
},
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
Data: map[string][]byte{
"username": []byte("user"),
"password": []byte("pass"),
"usernameCP": []byte("userCP"),
"passwordCP": []byte("passCP"),
},
}
}
| 607 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/providers/vsphere/reconciler/reconciler.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
controller "github.com/aws/eks-anywhere/pkg/controller"
logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockCNIReconciler is a mock of CNIReconciler interface.
type MockCNIReconciler struct {
ctrl *gomock.Controller
recorder *MockCNIReconcilerMockRecorder
}
// MockCNIReconcilerMockRecorder is the mock recorder for MockCNIReconciler.
type MockCNIReconcilerMockRecorder struct {
mock *MockCNIReconciler
}
// NewMockCNIReconciler creates a new mock instance.
func NewMockCNIReconciler(ctrl *gomock.Controller) *MockCNIReconciler {
mock := &MockCNIReconciler{ctrl: ctrl}
mock.recorder = &MockCNIReconcilerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCNIReconciler) EXPECT() *MockCNIReconcilerMockRecorder {
return m.recorder
}
// Reconcile mocks base method.
func (m *MockCNIReconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reconcile", ctx, logger, client, spec)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Reconcile indicates an expected call of Reconcile.
func (mr *MockCNIReconcilerMockRecorder) Reconcile(ctx, logger, client, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockCNIReconciler)(nil).Reconcile), ctx, logger, client, spec)
}
// MockRemoteClientRegistry is a mock of RemoteClientRegistry interface.
type MockRemoteClientRegistry struct {
ctrl *gomock.Controller
recorder *MockRemoteClientRegistryMockRecorder
}
// MockRemoteClientRegistryMockRecorder is the mock recorder for MockRemoteClientRegistry.
type MockRemoteClientRegistryMockRecorder struct {
mock *MockRemoteClientRegistry
}
// NewMockRemoteClientRegistry creates a new mock instance.
func NewMockRemoteClientRegistry(ctrl *gomock.Controller) *MockRemoteClientRegistry {
mock := &MockRemoteClientRegistry{ctrl: ctrl}
mock.recorder = &MockRemoteClientRegistryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockRemoteClientRegistry) EXPECT() *MockRemoteClientRegistryMockRecorder {
return m.recorder
}
// GetClient mocks base method.
func (m *MockRemoteClientRegistry) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetClient", ctx, cluster)
ret0, _ := ret[0].(client.Client)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetClient indicates an expected call of GetClient.
func (mr *MockRemoteClientRegistryMockRecorder) GetClient(ctx, cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockRemoteClientRegistry)(nil).GetClient), ctx, cluster)
}
// MockIPValidator is a mock of IPValidator interface.
type MockIPValidator struct {
ctrl *gomock.Controller
recorder *MockIPValidatorMockRecorder
}
// MockIPValidatorMockRecorder is the mock recorder for MockIPValidator.
type MockIPValidatorMockRecorder struct {
mock *MockIPValidator
}
// NewMockIPValidator creates a new mock instance.
func NewMockIPValidator(ctrl *gomock.Controller) *MockIPValidator {
mock := &MockIPValidator{ctrl: ctrl}
mock.recorder = &MockIPValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockIPValidator) EXPECT() *MockIPValidatorMockRecorder {
return m.recorder
}
// ValidateControlPlaneIP mocks base method.
func (m *MockIPValidator) ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateControlPlaneIP", ctx, log, spec)
ret0, _ := ret[0].(controller.Result)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ValidateControlPlaneIP indicates an expected call of ValidateControlPlaneIP.
func (mr *MockIPValidatorMockRecorder) ValidateControlPlaneIP(ctx, log, spec interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneIP", reflect.TypeOf((*MockIPValidator)(nil).ValidateControlPlaneIP), ctx, log, spec)
}
| 131 |
eks-anywhere | aws | Go | package setupuser
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/logger"
)
const (
vSphereRootPath = "/"
)
// GovcClient specifies govc functions required to configure a vsphere user.
type GovcClient interface {
CreateUser(ctx context.Context, username string, password string) error
UserExists(ctx context.Context, username string) (bool, error)
CreateGroup(ctx context.Context, name string) error
GroupExists(ctx context.Context, name string) (bool, error)
AddUserToGroup(ctx context.Context, name string, username string) error
RoleExists(ctx context.Context, name string) (bool, error)
CreateRole(ctx context.Context, name string, privileges []string) error
SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error
}
// SetupGOVCEnv creates appropriate govc environment variables to build govc client.
func SetupGOVCEnv(ctx context.Context, vsuc *VSphereSetupUserConfig) error {
err := os.Setenv("GOVC_URL", vsuc.Spec.Connection.Server)
if err != nil {
return err
}
err = os.Setenv("GOVC_INSECURE", strconv.FormatBool(vsuc.Spec.Connection.Insecure))
if err != nil {
return err
}
err = os.Setenv("GOVC_DATACENTER", vsuc.Spec.Datacenter)
if err != nil {
return err
}
return nil
}
// Run sets up a vSphere user with appropriate group, role, and permissions to create EKS-A kubernetes clusters.
func Run(ctx context.Context, vsuc *VSphereSetupUserConfig, govc GovcClient) error {
err := createGroup(ctx, vsuc, govc)
if err != nil {
return err
}
err = addUserToGroup(ctx, vsuc, govc)
if err != nil {
return err
}
err = createRoles(ctx, vsuc, govc)
if err != nil {
return err
}
err = associateRolesToObjects(ctx, vsuc, govc)
if err != nil {
return err
}
return nil
}
func createGroup(ctx context.Context, vsuc *VSphereSetupUserConfig, govc GovcClient) error {
exists, err := govc.GroupExists(ctx, vsuc.Spec.GroupName)
if err != nil {
return err
}
if !exists {
err = govc.CreateGroup(ctx, vsuc.Spec.GroupName)
} else {
logger.V(0).Info(fmt.Sprintf("Skipping creating group %s because it already exists", vsuc.Spec.GroupName))
}
if err != nil {
return err
}
return nil
}
func createRoles(ctx context.Context, vsuc *VSphereSetupUserConfig, govc GovcClient) error {
roles, err := getRoles(vsuc)
if err != nil {
return err
}
for _, r := range roles {
exists, err := govc.RoleExists(ctx, r.name)
if err != nil {
return err
}
if !exists {
err = govc.CreateRole(ctx, r.name, r.privs)
if err != nil {
logger.V(0).Info(fmt.Sprintf("Failed to create %s role with %v", r.name, r.privs))
return err
}
logger.V(0).Info(fmt.Sprintf("Created %s role", r.name))
} else {
logger.V(0).Info(fmt.Sprintf("Skipping creating %s role because it already exists", r.name))
}
}
return nil
}
func associateRolesToObjects(ctx context.Context, vsuc *VSphereSetupUserConfig, govc GovcClient) error {
err := setGroupRoleOnObjects(ctx, vsuc, govc, vsuc.Spec.GlobalRole, []string{vSphereRootPath})
if err != nil {
return err
}
adminRoleObjects := append(vsuc.Spec.Objects.Folders, vsuc.Spec.Objects.Templates...)
err = setGroupRoleOnObjects(ctx, vsuc, govc, vsuc.Spec.AdminRole, adminRoleObjects)
if err != nil {
return err
}
userRoleObjects := getUserRoleObjects(vsuc)
err = setGroupRoleOnObjects(ctx, vsuc, govc, vsuc.Spec.UserRole, userRoleObjects)
if err != nil {
return err
}
return nil
}
func addUserToGroup(ctx context.Context, vsuc *VSphereSetupUserConfig, govc GovcClient) error {
// associate user to group
err := govc.AddUserToGroup(ctx, vsuc.Spec.GroupName, vsuc.Spec.Username)
logger.V(0).Info(fmt.Sprintf("Adding user %s to group %s", vsuc.Spec.Username, vsuc.Spec.GroupName))
if err != nil {
return err
}
return nil
}
func setGroupRoleOnObjects(ctx context.Context, vsuc *VSphereSetupUserConfig, govc GovcClient, role string, objects []string) error {
for _, obj := range objects {
err := govc.SetGroupRoleOnObject(ctx, vsuc.Spec.GroupName, role, obj, vsuc.Spec.VSphereDomain)
if err != nil {
return err
}
logger.V(0).Info(fmt.Sprintf("Set role %s on %s for group %s", role, obj, vsuc.Spec.GroupName))
}
return nil
}
func getPrivsFromFile(privsContent string) ([]string, error) {
var requiredPrivs []string
err := json.Unmarshal([]byte(privsContent), &requiredPrivs)
if err != nil {
return nil, err
}
return requiredPrivs, nil
}
type vsphereRole struct {
name string
privs []string
}
func getRoles(vsuc *VSphereSetupUserConfig) ([]vsphereRole, error) {
globalPrivs, err := getPrivsFromFile(config.VSphereGlobalPrivsFile)
if err != nil {
return []vsphereRole{}, err
}
userPrivs, err := getPrivsFromFile(config.VSphereUserPrivsFile)
if err != nil {
return []vsphereRole{}, err
}
cloudAdminPrivs, err := getPrivsFromFile(config.VSphereAdminPrivsFile)
if err != nil {
return []vsphereRole{}, err
}
return []vsphereRole{
{
name: vsuc.Spec.GlobalRole,
privs: globalPrivs,
},
{
name: vsuc.Spec.UserRole,
privs: userPrivs,
},
{
name: vsuc.Spec.AdminRole,
privs: cloudAdminPrivs,
},
}, nil
}
func getUserRoleObjects(vsuc *VSphereSetupUserConfig) []string {
objects := append(vsuc.Spec.Objects.Networks, vsuc.Spec.Objects.Datastores...)
objects = append(objects, vsuc.Spec.Objects.ResourcePools...)
return objects
}
| 209 |
eks-anywhere | aws | Go | package setupuser_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/setupuser"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/setupuser/mocks"
)
type testConfig struct {
GroupExists bool
GlobalRoleExists bool
UserRoleExists bool
AdminRoleExists bool
}
func TestSetupUserRun(t *testing.T) {
defaults := testConfig{
GroupExists: false,
GlobalRoleExists: false,
UserRoleExists: false,
AdminRoleExists: false,
}
tests := []struct {
name string
filepath string
wantErr string
prepare func(context.Context, *setupuser.VSphereSetupUserConfig, *mocks.MockGovcClient, testConfig)
}{
{
name: "test setup vsphere user happy path",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(defaults.GlobalRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.GlobalRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(defaults.UserRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.UserRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(defaults.AdminRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.AdminRole, gomock.Any()).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Templates[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Networks[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Datastores[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.ResourcePools[0], c.Spec.VSphereDomain)
},
},
{
name: "test setup vsphere user happy path group exists",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(true, nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(defaults.GlobalRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.GlobalRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(defaults.UserRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.UserRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(defaults.AdminRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.AdminRole, gomock.Any()).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Templates[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Networks[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Datastores[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.ResourcePools[0], c.Spec.VSphereDomain)
},
},
{
name: "test GroupExists error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "failed to connect to govc",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, fmt.Errorf("failed to connect to govc"))
},
},
{
name: "test CreateGroup error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "failed to create group",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(fmt.Errorf("failed to create group"))
},
},
{
name: "test AddUserToGroup error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "failed to add user to group",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(fmt.Errorf("failed to add user to group"))
},
},
{
name: "test RoleExists GlobalRole true",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(defaults.GlobalRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.GlobalRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(defaults.UserRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.UserRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(defaults.AdminRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.AdminRole, gomock.Any()).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Templates[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Networks[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Datastores[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.ResourcePools[0], c.Spec.VSphereDomain)
},
},
{
name: "test RoleExists UserRole true",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(defaults.GlobalRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.GlobalRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(defaults.UserRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.UserRole, gomock.Any()).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(defaults.AdminRoleExists, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.AdminRole, gomock.Any()).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Templates[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Networks[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Datastores[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.ResourcePools[0], c.Spec.VSphereDomain)
},
},
{
name: "test RoleExists AdminRole true",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(true, nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Templates[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Networks[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Datastores[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.ResourcePools[0], c.Spec.VSphereDomain)
},
},
{
name: "test RoleExists GlobalRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(defaults.GlobalRoleExists, fmt.Errorf("govc error"))
},
},
{
name: "test RoleExists UserRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(false, fmt.Errorf("govc error"))
},
},
{
name: "test RoleExists AdminRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(false, fmt.Errorf("govc error"))
},
},
{
name: "test createRole GlobalRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(false, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.GlobalRole, gomock.Any()).Return(fmt.Errorf("govc error"))
},
},
{
name: "test createRole UserRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(false, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.UserRole, gomock.Any()).Return(fmt.Errorf("govc error"))
},
},
{
name: "test createRole AdminRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(false, nil)
gc.EXPECT().CreateRole(ctx, c.Spec.AdminRole, gomock.Any()).Return(fmt.Errorf("govc error"))
},
},
{
name: "test SetGroupRoleOnObject GlobalRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(true, nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain).Return(fmt.Errorf("govc error"))
},
},
{
name: "test SetGroupRoleOnObject AdminRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(true, nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain).Return(fmt.Errorf("govc error"))
},
},
{
name: "test SetGroupRoleOnObject UserRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(true, nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Templates[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Networks[0], c.Spec.VSphereDomain).Return(fmt.Errorf("govc error"))
},
},
{
name: "test SetGroupRoleOnObject UserRole exists",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient, defaults testConfig) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(defaults.GroupExists, nil)
gc.EXPECT().CreateGroup(ctx, c.Spec.GroupName).Return(nil)
gc.EXPECT().AddUserToGroup(ctx, c.Spec.GroupName, c.Spec.Username).Return(nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(true, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(true, nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.GlobalRole, "/", c.Spec.VSphereDomain).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Folders[0], c.Spec.VSphereDomain).Return(nil)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.AdminRole, c.Spec.Objects.Templates[0], c.Spec.VSphereDomain)
gc.EXPECT().SetGroupRoleOnObject(ctx, c.Spec.GroupName, c.Spec.UserRole, c.Spec.Objects.Networks[0], c.Spec.VSphereDomain).Return(fmt.Errorf("govc error"))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
c, err := setupuser.GenerateConfig(ctx, tt.filepath)
if err != nil {
t.Fatalf("failed to generate config from %s with %s", tt.filepath, err)
}
ctrl := gomock.NewController(t)
gc := mocks.NewMockGovcClient(ctrl)
tt.prepare(ctx, c, gc, defaults)
err = setupuser.Run(ctx, c, gc)
if tt.wantErr == "" {
g.Expect(err).To(Succeed())
g.Expect(c).ToNot(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
},
)
}
}
func TestSetupGOVCEnv(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
filepath string
wantErr string
prepare func(context.Context, *setupuser.VSphereSetupUserConfig) map[string]string
}{
{
name: "test SetupGOVCEnv happy path",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig) map[string]string {
wantEnv := map[string]string{
"GOVC_URL": c.Spec.Connection.Server,
"GOVC_INSECURE": "false",
"GOVC_DATACENTER": c.Spec.Datacenter,
}
return wantEnv
},
},
{
name: "test SetupGOVCEnv happy path insecure=true",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig) map[string]string {
c.Spec.Connection.Insecure = true
wantEnv := map[string]string{
"GOVC_URL": c.Spec.Connection.Server,
"GOVC_INSECURE": "true",
"GOVC_DATACENTER": c.Spec.Datacenter,
}
return wantEnv
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
c, err := setupuser.GenerateConfig(ctx, tt.filepath)
if err != nil {
t.Fatalf("failed to generate config from %s with %s", tt.filepath, err)
}
wantEnv := tt.prepare(ctx, c)
err = setupuser.SetupGOVCEnv(ctx, c)
if len(tt.wantErr) > 0 {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
for k, want := range wantEnv {
v := os.Getenv(k)
g.Expect(v).To(BeIdenticalTo(want))
}
if tt.wantErr == "" {
g.Expect(err).To(Succeed())
g.Expect(c).ToNot(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
},
)
}
}
| 444 |
eks-anywhere | aws | Go | package setupuser
import (
"context"
"fmt"
"os"
"strings"
"gopkg.in/yaml.v2"
)
const (
DefaultUsername = "eksa"
DefaultGroup = "EKSAUsers"
DefaultGlobalRole = "EKSAGlobalRole"
DefaultUserRole = "EKSAUserRole"
DefaultAdminRole = "EKSACloudAdminRole"
)
type Connection struct {
Server string `yaml:"server"`
Insecure bool `yaml:"insecure"`
}
type Objects struct {
Networks []string `yaml:"networks"`
Datastores []string `yaml:"datastores"`
ResourcePools []string `yaml:"resourcePools"`
Folders []string `yaml:"folders"`
Templates []string `yaml:"templates"`
}
type VSphereUserSpec struct {
Datacenter string `yaml:"datacenter"`
VSphereDomain string `yaml:"vSphereDomain"`
Connection Connection `yaml:"connection"`
Objects Objects `yaml:"objects"`
// Below are optional fields with defaults
Username string `yaml:"username"`
GroupName string `yaml:"group"`
GlobalRole string `yaml:"globalRole"`
UserRole string `yaml:"userRole"`
AdminRole string `yaml:"adminRole"`
}
type VSphereSetupUserConfig struct {
ApiVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
Spec VSphereUserSpec `yaml:"spec"`
}
func GenerateConfig(ctx context.Context, filepath string) (*VSphereSetupUserConfig, error) {
c, err := readConfig(ctx, filepath)
if err != nil {
return nil, err
}
err = validate(c)
if err != nil {
return nil, err
}
setDefaults(c)
return c, nil
}
func readConfig(ctx context.Context, filepath string) (*VSphereSetupUserConfig, error) {
file, err := os.ReadFile(filepath)
if err != nil {
return nil, fmt.Errorf("failed to read file %s, err = %v", filepath, err)
}
c := VSphereSetupUserConfig{}
if err = yaml.Unmarshal(file, &c); err != nil {
return nil, fmt.Errorf("failed to parse %s, err = %v", filepath, err)
}
return &c, nil
}
func validate(c *VSphereSetupUserConfig) error {
errs := []string{}
if c.Spec.Datacenter == "" {
errs = append(errs, "datacenter cannot be empty")
}
if c.Spec.VSphereDomain == "" {
errs = append(errs, "vSphereDomain cannot be empty")
}
if c.Spec.Connection.Server == "" {
errs = append(errs, "server cannot be empty")
}
if len(errs) > 0 {
return fmt.Errorf("validations failed: %s", strings.Join(errs[:], ","))
}
return nil
}
func setDefaults(c *VSphereSetupUserConfig) {
if c.Spec.GlobalRole == "" {
c.Spec.GlobalRole = DefaultGlobalRole
}
if c.Spec.UserRole == "" {
c.Spec.UserRole = DefaultUserRole
}
if c.Spec.AdminRole == "" {
c.Spec.AdminRole = DefaultAdminRole
}
if c.Spec.GroupName == "" {
c.Spec.GroupName = DefaultGroup
}
if c.Spec.Username == "" {
c.Spec.Username = DefaultUsername
}
}
// ValidateVSphereObjects validates objects do not exist before configuring user.
func ValidateVSphereObjects(ctx context.Context, c *VSphereSetupUserConfig, govc GovcClient) error {
exists, err := govc.GroupExists(ctx, c.Spec.GroupName)
if err != nil {
return err
}
if exists {
return fmt.Errorf("group %s already exists, please use force=true to ignore", c.Spec.GroupName)
}
roles := []string{c.Spec.GlobalRole, c.Spec.UserRole, c.Spec.AdminRole}
for _, r := range roles {
exists, err := govc.RoleExists(ctx, r)
if err != nil {
return err
}
if exists {
return fmt.Errorf("role %s already exists, please use force=true to ignore", r)
}
}
return nil
}
| 150 |
eks-anywhere | aws | Go | package setupuser_test
import (
"context"
"fmt"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/setupuser"
"github.com/aws/eks-anywhere/pkg/providers/vsphere/setupuser/mocks"
)
func TestGenerateConfigReadFile(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
filepath string
wantErr string
}{
{
name: "test generateconfig read file happy path",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
},
{
name: "test generateconfig read file bad yaml",
filepath: "./testdata/configs/not_yaml.yaml",
wantErr: "failed to parse ./testdata/configs/not_yaml.yaml, err = yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `this is...` ",
},
{
name: "test generateconfig read file does not exist",
filepath: "./testdata/configs/not_a_file.yaml",
wantErr: "failed to read file ./testdata/configs/not_a_file.yaml",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
c, err := setupuser.GenerateConfig(ctx, tt.filepath)
if tt.wantErr == "" {
g.Expect(err).To(Succeed())
g.Expect(c).ToNot(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
},
)
}
}
func TestGenerateConfigValidations(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
filepath string
wantErr string
}{
{
name: "test read file happy path",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
},
{
name: "test validate datacenter not empty",
filepath: "./testdata/configs/empty.yaml",
wantErr: "datacenter cannot be empty",
},
{
name: "test validate vspheredomain not empty",
filepath: "./testdata/configs/empty.yaml",
wantErr: "vSphereDomain cannot be empty",
},
{
name: "test validate connection",
filepath: "./testdata/configs/empty.yaml",
wantErr: "server cannot be empty",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
c, err := setupuser.GenerateConfig(ctx, tt.filepath)
if tt.wantErr == "" {
g.Expect(err).To(BeNil())
g.Expect(c).ToNot(BeNil())
} else {
g.Expect(err.Error()).To(ContainSubstring(tt.wantErr))
}
},
)
}
}
func TestGenerateConfigSetDefaults(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
filepath string
}{
{
name: "test populating config with defaults happy path",
filepath: "./testdata/configs/valid_minimal.yaml",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
c, err := setupuser.GenerateConfig(ctx, tt.filepath)
g.Expect(err).To(Succeed())
g.Expect(c.Spec.Username).To(Equal(setupuser.DefaultUsername))
g.Expect(c.Spec.GroupName).To(Equal(setupuser.DefaultGroup))
g.Expect(c.Spec.GlobalRole).To(Equal(setupuser.DefaultGlobalRole))
g.Expect(c.Spec.UserRole).To(Equal(setupuser.DefaultUserRole))
g.Expect(c.Spec.AdminRole).To(Equal(setupuser.DefaultAdminRole))
},
)
}
}
func TestValidateVSphereObjects(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
filepath string
wantErr string
prepare func(context.Context, *setupuser.VSphereSetupUserConfig, *mocks.MockGovcClient)
}{
{
name: "test validate happy path",
filepath: "./testdata/configs/valid.yaml",
wantErr: "",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(false, nil)
},
},
{
name: "test GroupExists error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(false, fmt.Errorf("govc error"))
},
},
{
name: "test RoleExists GlobalRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(false, fmt.Errorf("govc error"))
},
},
{
name: "test validate RoleExists UserRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(false, fmt.Errorf("govc error"))
},
},
{
name: "test validate RoleExists AdminRole error",
filepath: "./testdata/configs/valid.yaml",
wantErr: "govc error",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(false, fmt.Errorf("govc error"))
},
},
{
name: "test validate RoleExists AdminRole true",
filepath: "./testdata/configs/valid.yaml",
wantErr: "role MyExistingEKSAAdminRole already exists, please use force=true to ignore",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.GlobalRole).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.UserRole).Return(false, nil)
gc.EXPECT().RoleExists(ctx, c.Spec.AdminRole).Return(true, nil)
},
},
{
name: "test validate GroupExists true",
filepath: "./testdata/configs/valid.yaml",
wantErr: "group MyExistingGroup already exists, please use force=true to ignore",
prepare: func(ctx context.Context, c *setupuser.VSphereSetupUserConfig, gc *mocks.MockGovcClient) {
gc.EXPECT().GroupExists(ctx, c.Spec.GroupName).Return(true, nil)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
c, err := setupuser.GenerateConfig(ctx, tt.filepath)
if err != nil {
t.Fatalf("failed to generate config from %s with %s", tt.filepath, err)
}
ctrl := gomock.NewController(t)
gc := mocks.NewMockGovcClient(ctrl)
tt.prepare(ctx, c, gc)
err = setupuser.ValidateVSphereObjects(ctx, c, gc)
if tt.wantErr == "" {
g.Expect(err).To(Succeed())
g.Expect(c).ToNot(BeNil())
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
},
)
}
}
| 237 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/providers/vsphere/setupuser (interfaces: GovcClient)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockGovcClient is a mock of GovcClient interface.
type MockGovcClient struct {
ctrl *gomock.Controller
recorder *MockGovcClientMockRecorder
}
// MockGovcClientMockRecorder is the mock recorder for MockGovcClient.
type MockGovcClientMockRecorder struct {
mock *MockGovcClient
}
// NewMockGovcClient creates a new mock instance.
func NewMockGovcClient(ctrl *gomock.Controller) *MockGovcClient {
mock := &MockGovcClient{ctrl: ctrl}
mock.recorder = &MockGovcClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGovcClient) EXPECT() *MockGovcClientMockRecorder {
return m.recorder
}
// AddUserToGroup mocks base method.
func (m *MockGovcClient) AddUserToGroup(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddUserToGroup", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// AddUserToGroup indicates an expected call of AddUserToGroup.
func (mr *MockGovcClientMockRecorder) AddUserToGroup(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUserToGroup", reflect.TypeOf((*MockGovcClient)(nil).AddUserToGroup), arg0, arg1, arg2)
}
// CreateGroup mocks base method.
func (m *MockGovcClient) CreateGroup(arg0 context.Context, arg1 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateGroup", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// CreateGroup indicates an expected call of CreateGroup.
func (mr *MockGovcClientMockRecorder) CreateGroup(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateGroup", reflect.TypeOf((*MockGovcClient)(nil).CreateGroup), arg0, arg1)
}
// CreateRole mocks base method.
func (m *MockGovcClient) CreateRole(arg0 context.Context, arg1 string, arg2 []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateRole", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateRole indicates an expected call of CreateRole.
func (mr *MockGovcClientMockRecorder) CreateRole(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRole", reflect.TypeOf((*MockGovcClient)(nil).CreateRole), arg0, arg1, arg2)
}
// CreateUser mocks base method.
func (m *MockGovcClient) CreateUser(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateUser", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateUser indicates an expected call of CreateUser.
func (mr *MockGovcClientMockRecorder) CreateUser(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockGovcClient)(nil).CreateUser), arg0, arg1, arg2)
}
// GroupExists mocks base method.
func (m *MockGovcClient) GroupExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GroupExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GroupExists indicates an expected call of GroupExists.
func (mr *MockGovcClientMockRecorder) GroupExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupExists", reflect.TypeOf((*MockGovcClient)(nil).GroupExists), arg0, arg1)
}
// RoleExists mocks base method.
func (m *MockGovcClient) RoleExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RoleExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RoleExists indicates an expected call of RoleExists.
func (mr *MockGovcClientMockRecorder) RoleExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoleExists", reflect.TypeOf((*MockGovcClient)(nil).RoleExists), arg0, arg1)
}
// SetGroupRoleOnObject mocks base method.
func (m *MockGovcClient) SetGroupRoleOnObject(arg0 context.Context, arg1, arg2, arg3, arg4 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetGroupRoleOnObject", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// SetGroupRoleOnObject indicates an expected call of SetGroupRoleOnObject.
func (mr *MockGovcClientMockRecorder) SetGroupRoleOnObject(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetGroupRoleOnObject", reflect.TypeOf((*MockGovcClient)(nil).SetGroupRoleOnObject), arg0, arg1, arg2, arg3, arg4)
}
// UserExists mocks base method.
func (m *MockGovcClient) UserExists(arg0 context.Context, arg1 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UserExists", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UserExists indicates an expected call of UserExists.
func (mr *MockGovcClientMockRecorder) UserExists(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UserExists", reflect.TypeOf((*MockGovcClient)(nil).UserExists), arg0, arg1)
}
| 151 |
eks-anywhere | aws | Go | package registry
import releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
// Artifact to head release dependency.
type Artifact struct {
Registry string
Repository string
Tag string
Digest string
}
// NewArtifact creates a new artifact object.
func NewArtifact(registry, repository, tag, digest string) Artifact {
return Artifact{
Registry: registry,
Repository: repository,
Tag: tag,
Digest: digest,
}
}
// NewArtifactFromURI creates a new artifact object from a URI.
func NewArtifactFromURI(uri string) Artifact {
image := releasev1.Image{
URI: uri,
}
return Artifact{
Registry: image.Registry(),
Repository: image.Repository(),
Tag: image.Tag(),
Digest: image.Digest(),
}
}
// Version returns tag or digest.
func (art *Artifact) Version() string {
if art.Digest != "" {
return "@" + art.Digest
}
return ":" + art.Tag
}
// VersionedImage returns full URI for image.
func (art *Artifact) VersionedImage() string {
version := art.Version()
return art.Registry + "/" + art.Repository + version
}
| 49 |
eks-anywhere | aws | Go | package registry_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/registry"
)
func TestArtifact_VersionTag(t *testing.T) {
artifact := registry.NewArtifact("localhost:8443", "owner/repo", "latest", "")
assert.Equal(t, ":latest", artifact.Version())
assert.Equal(t, "localhost:8443/owner/repo:latest", artifact.VersionedImage())
}
func TestArtifact_VersionDigest(t *testing.T) {
artifact := registry.NewArtifact("localhost:8443", "owner/repo", "", "sha256:0db6a")
assert.Equal(t, "@sha256:0db6a", artifact.Version())
assert.Equal(t, "localhost:8443/owner/repo@sha256:0db6a", artifact.VersionedImage())
}
| 22 |
eks-anywhere | aws | Go | package registry
// Cache storage client for an OCI registry.
type Cache struct {
registries map[string]StorageClient
}
// NewCache creates an OCI registry client.
func NewCache() *Cache {
return &Cache{
registries: make(map[string]StorageClient),
}
}
// Get cached registry client or make it.
func (cache *Cache) Get(context StorageContext) (StorageClient, error) {
aClient, found := cache.registries[context.host]
if !found {
aClient = NewOCIRegistry(context)
err := aClient.Init()
if err != nil {
return nil, err
}
cache.registries[context.host] = aClient
}
return aClient, nil
}
// Set a client in the cache.
func (cache *Cache) Set(registryName string, client StorageClient) {
cache.registries[registryName] = client
}
| 33 |
eks-anywhere | aws | Go | package registry_test
import (
"crypto/x509"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/registry"
)
func TestCache_Get(t *testing.T) {
cache := registry.NewCache()
credentialStore := registry.NewCredentialStore()
certificates := &x509.CertPool{}
registryContext := registry.NewStorageContext("localhost", credentialStore, certificates, false)
result, err := cache.Get(registryContext)
assert.NoError(t, err)
ociRegistry, ok := result.(*registry.OCIRegistryClient)
assert.True(t, ok)
assert.Equal(t, "localhost", ociRegistry.GetHost())
registryContext = registry.NewStorageContext("!@#$", credentialStore, certificates, true)
result, err = cache.Get(registryContext)
assert.EqualError(t, err, "error with registry <!@#$>: invalid reference: invalid registry")
busted, ok := result.(*registry.OCIRegistryClient)
assert.False(t, ok)
assert.Nil(t, busted)
artifact := registry.NewArtifactFromURI("localhost/owner/name:latest")
assert.Equal(t, "localhost", artifact.Registry)
cache.Set("localhost", result)
}
| 36 |
eks-anywhere | aws | Go | package registry
import (
"crypto/x509"
"fmt"
"os"
"path/filepath"
)
// GetCertificates get X509 certificates.
func GetCertificates(certFile string) (certificates *x509.CertPool, err error) {
if len(certFile) < 1 {
return nil, nil
}
fileContents, err := os.ReadFile(filepath.Clean(certFile))
if err != nil {
return nil, fmt.Errorf("error reading certificate file <%s>: %v", certFile, err)
}
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM(fileContents)
return certPool, nil
}
| 24 |
eks-anywhere | aws | Go | package registry_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/registry"
)
func TestGetCertificatesSuccess(t *testing.T) {
result, err := registry.GetCertificates("testdata/harbor.eksa.demo.crt")
assert.NotNil(t, result)
assert.NoError(t, err)
}
func TestGetCertificatesNothing(t *testing.T) {
result, err := registry.GetCertificates("")
assert.Nil(t, result)
assert.NoError(t, err)
}
func TestGetCertificatesError(t *testing.T) {
result, err := registry.GetCertificates("bogus.crt")
assert.Nil(t, result)
assert.EqualError(t, err, "error reading certificate file <bogus.crt>: open bogus.crt: no such file or directory")
}
| 28 |
eks-anywhere | aws | Go | package registry
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"path"
"sync"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"oras.land/oras-go/v2"
"oras.land/oras-go/v2/content"
orasregistry "oras.land/oras-go/v2/registry"
"oras.land/oras-go/v2/registry/remote"
"oras.land/oras-go/v2/registry/remote/auth"
)
// OCIRegistryClient storage client for an OCI registry.
type OCIRegistryClient struct {
StorageContext
initialized sync.Once
registry *remote.Registry
}
var _ StorageClient = (*OCIRegistryClient)(nil)
// NewOCIRegistry create an OCI registry client.
func NewOCIRegistry(context StorageContext) *OCIRegistryClient {
return &OCIRegistryClient{
StorageContext: context,
}
}
// Init registry configuration.
func (or *OCIRegistryClient) Init() error {
var err error
onceFunc := func() {
or.registry, err = remote.NewRegistry(or.host)
if err != nil {
err = fmt.Errorf("error with registry <%s>: %v", or.host, err)
return
}
transport := http.DefaultTransport.(*http.Transport).Clone()
{ // #nosec G402
transport.TLSClientConfig = &tls.Config{
RootCAs: or.certificates,
InsecureSkipVerify: or.insecure,
}
}
authClient := &auth.Client{
Client: &http.Client{
Transport: transport,
},
Cache: auth.NewCache(),
}
authClient.SetUserAgent("eksa")
authClient.Credential = func(ctx context.Context, s string) (auth.Credential, error) {
return or.credentialStore.Credential(s)
}
or.registry.Client = authClient
}
or.initialized.Do(onceFunc)
return err
}
// GetHost for registry host.
func (or *OCIRegistryClient) GetHost() string {
return or.host
}
// SetProject for registry destination.
func (or *OCIRegistryClient) SetProject(project string) {
or.project = project
}
// Destination of this storage registry.
func (or *OCIRegistryClient) Destination(image Artifact) string {
return path.Join(or.host, or.project, image.Repository) + image.Version()
}
// GetStorage object based on repository.
func (or *OCIRegistryClient) GetStorage(ctx context.Context, artifact Artifact) (repo orasregistry.Repository, err error) {
dstRepo := path.Join(or.project, artifact.Repository)
repo, err = or.registry.Repository(ctx, dstRepo)
if err != nil {
return nil, fmt.Errorf("error creating repository %s: %v", dstRepo, err)
}
return repo, nil
}
// Resolve the location of the source repository given the image.
func (or *OCIRegistryClient) Resolve(ctx context.Context, srcStorage orasregistry.Repository, versionedImage string) (desc ocispec.Descriptor, err error) {
or.registry.Reference.Reference = versionedImage
return srcStorage.Resolve(ctx, or.registry.Reference.Reference)
}
// FetchBytes a resource from the registry.
func (or *OCIRegistryClient) FetchBytes(ctx context.Context, srcStorage orasregistry.Repository, artifact Artifact) (ocispec.Descriptor, []byte, error) {
return oras.FetchBytes(ctx, srcStorage, artifact.VersionedImage(), oras.DefaultFetchBytesOptions)
}
// FetchBlob get named blob.
func (or *OCIRegistryClient) FetchBlob(ctx context.Context, srcStorage orasregistry.Repository, descriptor ocispec.Descriptor) ([]byte, error) {
return content.FetchAll(ctx, srcStorage, descriptor)
}
// CopyGraph copy manifest and all blobs to destination.
func (or *OCIRegistryClient) CopyGraph(ctx context.Context, srcStorage orasregistry.Repository, srcRef string, dstStorage orasregistry.Repository, dstRef string) (ocispec.Descriptor, error) {
return oras.Copy(ctx, srcStorage, srcRef, dstStorage, dstRef, oras.CopyOptions{})
}
// Tag an image.
func (or *OCIRegistryClient) Tag(ctx context.Context, dstStorage orasregistry.Repository, desc ocispec.Descriptor, tag string) error {
return dstStorage.Tag(ctx, desc, tag)
}
| 118 |
eks-anywhere | aws | Go | package registry_test
import (
"context"
"crypto/x509"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/registry"
)
var (
ctx = context.Background()
image = registry.Artifact{
Registry: "public.ecr.aws",
Repository: "eks-anywhere/eks-anywhere-packages",
Digest: "sha256:6efe21500abbfbb6b3e37b80dd5dea0b11a0d1b145e84298fee5d7784a77e967",
Tag: "0.2.22-eks-a-24",
}
credentialStore = registry.NewCredentialStore()
certificates = &x509.CertPool{}
registryContext = registry.NewStorageContext("localhost", credentialStore, certificates, false)
)
func TestOCIRegistryClient_Init(t *testing.T) {
sut := registry.NewOCIRegistry(registryContext)
err := sut.Init()
assert.NoError(t, err)
// Does not reinitialize
err = sut.Init()
assert.NoError(t, err)
}
func TestOCIRegistryClient_Destination(t *testing.T) {
sut := registry.NewOCIRegistry(registryContext)
destination := sut.Destination(image)
assert.Equal(t, "localhost/eks-anywhere/eks-anywhere-packages@sha256:6efe21500abbfbb6b3e37b80dd5dea0b11a0d1b145e84298fee5d7784a77e967", destination)
sut.SetProject("project/")
destination = sut.Destination(image)
assert.Equal(t, "localhost/project/eks-anywhere/eks-anywhere-packages@sha256:6efe21500abbfbb6b3e37b80dd5dea0b11a0d1b145e84298fee5d7784a77e967", destination)
}
func TestOCIRegistryClient_GetStorage(t *testing.T) {
sut := registry.NewOCIRegistry(registryContext)
assert.NoError(t, sut.Init())
_, err := sut.GetStorage(context.Background(), image)
assert.NoError(t, err)
bogusImage := registry.Artifact{
Registry: "localhost",
Repository: "!@#$",
Digest: "sha256:6efe21500abbfbb6b3e37b80dd5dea0b11a0d1b145e84298fee5d7784a77e967",
}
_, err = sut.GetStorage(context.Background(), bogusImage)
assert.EqualError(t, err, "error creating repository !@#$: invalid reference: invalid repository")
}
| 60 |
eks-anywhere | aws | Go | package registry
import (
"context"
"fmt"
)
// Copy an image from a source to a destination.
func Copy(ctx context.Context, srcClient StorageClient, dstClient StorageClient, image Artifact) (err error) {
srcStorage, err := srcClient.GetStorage(ctx, image)
if err != nil {
return fmt.Errorf("repository source: %v", err)
}
dstStorage, err := dstClient.GetStorage(ctx, image)
if err != nil {
return fmt.Errorf("repository destination: %v", err)
}
desc, err := srcClient.CopyGraph(ctx, srcStorage, image.VersionedImage(), dstStorage, dstClient.Destination(image))
if err != nil {
return fmt.Errorf("registry copy: %v", err)
}
if len(image.Tag) > 0 {
err = dstClient.Tag(ctx, dstStorage, desc, image.Tag)
if err != nil {
return fmt.Errorf("image tag: %v", err)
}
}
return nil
}
| 33 |
eks-anywhere | aws | Go | package registry_test
import (
"fmt"
"testing"
"github.com/golang/mock/gomock"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/registry"
"github.com/aws/eks-anywhere/pkg/registry/mocks"
)
var srcArtifact = registry.Artifact{
Registry: "public.ecr.aws",
Repository: "l0g8r8j6/kube-vip/kube-vip",
Tag: "v0.5.5-eks-a-v0.0.0-dev-build.4452",
Digest: "sha256:6efe21500abbfbb6b3e37b80dd5dea0b11a0d1b145e84298fee5d7784a77e967",
}
var expectedSrcRef = srcArtifact.VersionedImage()
var desc = ocispec.Descriptor{
URLs: []string{expectedSrcRef},
}
func TestCopy(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
dstClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
mockDstRepo := *mocks.NewMockRepository(gomock.NewController(t))
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
dstClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockDstRepo, nil)
dstClient.EXPECT().Destination(srcArtifact).Return(expectedSrcRef)
srcClient.EXPECT().CopyGraph(ctx, &mockSrcRepo, expectedSrcRef, &mockDstRepo, expectedSrcRef).Return(desc, nil)
dstClient.EXPECT().Tag(ctx, &mockDstRepo, desc, srcArtifact.Tag).Return(nil)
err := registry.Copy(ctx, srcClient, dstClient, srcArtifact)
assert.NoError(t, err)
}
func TestCopyTagError(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
dstClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
mockDstRepo := *mocks.NewMockRepository(gomock.NewController(t))
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
dstClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockDstRepo, nil)
dstClient.EXPECT().Destination(srcArtifact).Return(expectedSrcRef)
srcClient.EXPECT().CopyGraph(ctx, &mockSrcRepo, expectedSrcRef, &mockDstRepo, expectedSrcRef).Return(desc, nil)
dstClient.EXPECT().Tag(ctx, &mockDstRepo, desc, srcArtifact.Tag).Return(fmt.Errorf("oops"))
err := registry.Copy(ctx, srcClient, dstClient, srcArtifact)
assert.EqualError(t, err, "image tag: oops")
}
func TestCopyCopyGraphError(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
dstClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
mockDstRepo := *mocks.NewMockRepository(gomock.NewController(t))
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
dstClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockDstRepo, nil)
dstClient.EXPECT().Destination(srcArtifact).Return(srcArtifact.VersionedImage())
expectedSrcRef := srcArtifact.VersionedImage()
srcClient.EXPECT().CopyGraph(ctx, &mockSrcRepo, expectedSrcRef, &mockDstRepo, expectedSrcRef).Return(desc, fmt.Errorf("oops"))
err := registry.Copy(ctx, srcClient, dstClient, srcArtifact)
assert.EqualError(t, err, "registry copy: oops")
}
func TestCopyDstGetStorageError(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
dstClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
mockDstRepo := *mocks.NewMockRepository(gomock.NewController(t))
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, nil)
dstClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockDstRepo, fmt.Errorf("oops"))
err := registry.Copy(ctx, srcClient, dstClient, srcArtifact)
assert.EqualError(t, err, "repository destination: oops")
}
func TestCopySrcGetStorageError(t *testing.T) {
srcClient := mocks.NewMockStorageClient(gomock.NewController(t))
dstClient := mocks.NewMockStorageClient(gomock.NewController(t))
mockSrcRepo := *mocks.NewMockRepository(gomock.NewController(t))
srcClient.EXPECT().GetStorage(ctx, srcArtifact).Return(&mockSrcRepo, fmt.Errorf("oops"))
err := registry.Copy(ctx, srcClient, dstClient, srcArtifact)
assert.EqualError(t, err, "repository source: oops")
}
| 104 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.