diff --git a/.archive/unmanaged_CAPI_dedicated_host.go b/.archive/unmanaged_CAPI_dedicated_host.go new file mode 100644 index 0000000000..e1d3cd7444 --- /dev/null +++ b/.archive/unmanaged_CAPI_dedicated_host.go @@ -0,0 +1,113 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unmanaged + +import ( + "context" + + "github.com/gofrs/flock" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" +) + +// setupNamespace initializes the namespace for the test. +func setupNamespace(ctx context.Context, e2eCtx *shared.E2EContext) *corev1.Namespace { + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + return shared.SetupSpecNamespace(ctx, "capa-dedicate-host", e2eCtx) +} + +// setupRequiredResources allocates the required resources for the test. +func setupRequiredResources(e2eCtx *shared.E2EContext) *shared.TestResource { + requiredResources := &shared.TestResource{ + EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, + IGW: 1, + NGW: 1, + VPC: 1, + ClassicLB: 1, + EIP: 3, + EventBridgeRules: 50, + } + requiredResources.WriteRequestedResources(e2eCtx, "capa-dedicated-hosts-test") + + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + return requiredResources +} + +// releaseResources releases the resources allocated for the test. +func releaseResources(requiredResources *shared.TestResource, e2eCtx *shared.E2EContext) { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) +} + +// runQuickStartSpec executes the QuickStartSpec test. +func runQuickStartSpec(e2eCtx *shared.E2EContext) { + capi_e2e.QuickStartSpec(context.TODO(), func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + ArtifactFolder: e2eCtx.Settings.ArtifactFolder, + SkipCleanup: e2eCtx.Settings.SkipCleanup, + } + }) +} + +// cleanupNamespace cleans up the namespace and dumps resources. +func cleanupNamespace(ctx context.Context, namespace *corev1.Namespace, e2eCtx *shared.E2EContext) { + shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) +} + +var _ = ginkgo.Context("[unmanaged] [dedicated-host]", func() { + var ( + namespace *corev1.Namespace + ctx context.Context + requiredResources *shared.TestResource + dedicatedHostID string + ) + + ginkgo.BeforeEach(func() { + ctx = context.TODO() + namespace = setupNamespace(ctx, e2eCtx) + dedicatedHostID, _ = shared.GetDedicatedHost(e2eCtx) + }) + + ginkgo.Describe("Running the dedicated-hosts spec", func() { + ginkgo.BeforeEach(func() { + requiredResources = setupRequiredResources(e2eCtx) + // e2eCtx.Settings.DedicatedHostID = dedicatedHostID + }) + + ginkgo.It("should run the QuickStartSpec", func() { + runQuickStartSpec(e2eCtx) + }) + + ginkgo.AfterEach(func() { + shared.DeleteDedicatedHost(e2eCtx, dedicatedHostID) + releaseResources(requiredResources, e2eCtx) + }) + }) + + ginkgo.AfterEach(func() { + cleanupNamespace(ctx, namespace, e2eCtx) + }) +}) diff --git a/.gitignore b/.gitignore index ada3a863fa..4919a99f3f 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ envfile # kubeconfigs kind.kubeconfig minikube.kubeconfig +capi-test.kubeconfig kubeconfig !kubeconfig/ @@ -63,3 +64,5 @@ dist _artifacts awsiamconfiguration.yaml cloudformation.yaml +test-cluster.yaml +__debug* diff --git a/api/v1beta1/awscluster_conversion.go b/api/v1beta1/awscluster_conversion.go index 8c8e38a297..fef74ffa96 100644 --- a/api/v1beta1/awscluster_conversion.go +++ b/api/v1beta1/awscluster_conversion.go @@ -62,6 +62,8 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { dst.Status.Bastion.NetworkInterfaceType = restored.Status.Bastion.NetworkInterfaceType dst.Status.Bastion.CapacityReservationID = restored.Status.Bastion.CapacityReservationID dst.Status.Bastion.MarketType = restored.Status.Bastion.MarketType + dst.Status.Bastion.HostAffinity = restored.Status.Bastion.HostAffinity + dst.Status.Bastion.HostID = restored.Status.Bastion.HostID } dst.Spec.Partition = restored.Spec.Partition diff --git a/api/v1beta1/awsmachine_conversion.go b/api/v1beta1/awsmachine_conversion.go index c5ac50ade1..87a9b67c75 100644 --- a/api/v1beta1/awsmachine_conversion.go +++ b/api/v1beta1/awsmachine_conversion.go @@ -56,6 +56,9 @@ func (src *AWSMachine) ConvertTo(dstRaw conversion.Hub) error { } } + dst.Spec.HostAffinity = restored.Spec.HostAffinity + dst.Spec.HostID = restored.Spec.HostID + return nil } @@ -119,6 +122,8 @@ func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder = restored.Spec.Template.Spec.ElasticIPPool.PublicIpv4PoolFallBackOrder } } + dst.Spec.Template.Spec.HostAffinity = restored.Spec.Template.Spec.HostAffinity + dst.Spec.Template.Spec.HostID = restored.Spec.Template.Spec.HostID return nil } diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 8a5a11b60c..1b89ffb498 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -1435,6 +1435,8 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW // WARNING: in.PrivateDNSName requires manual conversion: does not exist in peer-type // WARNING: in.CapacityReservationID requires manual conversion: does not exist in peer-type // WARNING: in.MarketType requires manual conversion: does not exist in peer-type + // WARNING: in.HostID requires manual conversion: does not exist in peer-type + // WARNING: in.HostAffinity requires manual conversion: does not exist in peer-type return nil } @@ -2039,6 +2041,8 @@ func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out // WARNING: in.PublicIPOnLaunch requires manual conversion: does not exist in peer-type // WARNING: in.CapacityReservationID requires manual conversion: does not exist in peer-type // WARNING: in.MarketType requires manual conversion: does not exist in peer-type + // WARNING: in.HostID requires manual conversion: does not exist in peer-type + // WARNING: in.HostAffinity requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1beta2/awsmachine_types.go b/api/v1beta2/awsmachine_types.go index 191e46bddf..69c10d1481 100644 --- a/api/v1beta2/awsmachine_types.go +++ b/api/v1beta2/awsmachine_types.go @@ -223,6 +223,16 @@ type AWSMachineSpec struct { // If marketType is not specified and spotMarketOptions is provided, the marketType defaults to "Spot". // +optional MarketType MarketType `json:"marketType,omitempty"` + + // HostID specifies the Dedicated Host on which the instance should be launched. + // +optional + HostID *string `json:"hostId,omitempty"` + + // Affinity specifies the dedicated host affinity setting for the instance. + // When affinity is set to Host, an instance launched onto a specific host always restarts on the same host if stopped. + // +optional + // +kubebuilder:validation:Enum:=Defailt;Host + HostAffinity *string `json:"hostAffinity,omitempty"` } // CloudInit defines options related to the bootstrapping systems where diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index bee54a9f0b..fe513b6402 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -273,6 +273,14 @@ type Instance struct { // If marketType is not specified and spotMarketOptions is provided, the marketType defaults to "Spot". // +optional MarketType MarketType `json:"marketType,omitempty"` + + // HostID specifies the dedicated host on which the instance should be launched + // +optional + HostID *string `json:"hostID,omitempty"` + + // Affinity specifies the dedicated host affinity setting for the instance. + // +optional + HostAffinity *string `json:"hostAffinity,omitempty"` } // MarketType describes the market type of an Instance diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index a3ef61f24e..297dcfdfc7 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -771,6 +771,16 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) { *out = new(string) **out = **in } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostAffinity != nil { + in, out := &in.HostAffinity, &out.HostAffinity + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineSpec. @@ -1610,6 +1620,16 @@ func (in *Instance) DeepCopyInto(out *Instance) { *out = new(string) **out = **in } + if in.HostID != nil { + in, out := &in.HostID, &out.HostID + *out = new(string) + **out = **in + } + if in.HostAffinity != nil { + in, out := &in.HostAffinity, &out.HostAffinity + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index b9064ea810..556c2e6b32 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -1136,6 +1136,14 @@ spec: description: Specifies whether enhanced networking with ENA is enabled. type: boolean + hostAffinity: + description: Affinity specifies the dedicated host affinity setting + for the instance. + type: string + hostID: + description: HostID specifies the dedicated host on which the + instance should be launched + type: string iamProfile: description: The name of the IAM instance profile associated with the instance, if applicable. @@ -3224,6 +3232,14 @@ spec: description: Specifies whether enhanced networking with ENA is enabled. type: boolean + hostAffinity: + description: Affinity specifies the dedicated host affinity setting + for the instance. + type: string + hostID: + description: HostID specifies the dedicated host on which the + instance should be launched + type: string iamProfile: description: The name of the IAM instance profile associated with the instance, if applicable. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index 0684070332..7a5a3c4ebc 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -2103,6 +2103,14 @@ spec: description: Specifies whether enhanced networking with ENA is enabled. type: boolean + hostAffinity: + description: Affinity specifies the dedicated host affinity setting + for the instance. + type: string + hostID: + description: HostID specifies the dedicated host on which the + instance should be launched + type: string iamProfile: description: The name of the IAM instance profile associated with the instance, if applicable. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml index 5baacc3e2f..bbf467f1ee 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml @@ -686,6 +686,18 @@ spec: - message: allowed values are 'none' and 'amazon-pool' rule: self in ['none','amazon-pool'] type: object + hostAffinity: + description: |- + Affinity specifies the dedicated host affinity setting for the instance. + When affinity is set to Host, an instance launched onto a specific host always restarts on the same host if stopped. + enum: + - Defailt + - Host + type: string + hostId: + description: HostID specifies the Dedicated Host on which the instance + should be launched. + type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile to assign to the instance diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml index 40cf10944a..80e7bcde27 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml @@ -620,6 +620,18 @@ spec: - message: allowed values are 'none' and 'amazon-pool' rule: self in ['none','amazon-pool'] type: object + hostAffinity: + description: |- + Affinity specifies the dedicated host affinity setting for the instance. + When affinity is set to Host, an instance launched onto a specific host always restarts on the same host if stopped. + enum: + - Defailt + - Host + type: string + hostId: + description: HostID specifies the Dedicated Host on which + the instance should be launched. + type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile to assign to the instance diff --git a/devbox.json b/devbox.json index 9525803140..4286f16112 100644 --- a/devbox.json +++ b/devbox.json @@ -1,7 +1,6 @@ { "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.13.7/.schema/devbox.schema.json", "packages": [ - "go@1.22", "kind@latest", "docker@latest", "jq@latest", @@ -11,7 +10,8 @@ "tilt@latest", "awscli2@latest", "direnv@latest", - "kustomize@latest" + "kustomize@5.5.0", + "go@1.24.1" ], "shell": { "init_hook": [ @@ -27,4 +27,4 @@ ] } } -} \ No newline at end of file +} diff --git a/devbox.lock b/devbox.lock index 39efc10668..279bb77b7f 100644 --- a/devbox.lock +++ b/devbox.lock @@ -236,51 +236,51 @@ "github:NixOS/nixpkgs/nixpkgs-unstable": { "resolved": "github:NixOS/nixpkgs/3549532663732bfd89993204d40543e9edaec4f2?lastModified=1742272065&narHash=sha256-ud8vcSzJsZ%2FCK%2Br8%2Fv0lyf4yUntVmDq6Z0A41ODfWbE%3D" }, - "go@1.22": { - "last_modified": "2024-12-23T21:10:33Z", - "resolved": "github:NixOS/nixpkgs/de1864217bfa9b5845f465e771e0ecb48b30e02d#go_1_22", + "go@1.24.1": { + "last_modified": "2025-03-23T05:31:05Z", + "resolved": "github:NixOS/nixpkgs/dd613136ee91f67e5dba3f3f41ac99ae89c5406b#go", "source": "devbox-search", - "version": "1.22.10", + "version": "1.24.1", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/34qa7mwbc1ja7758q4d9sjwmgip72lj9-go-1.22.10", + "path": "/nix/store/ja4jxx60lh1qfqfl4z4p2rff56ia1c3c-go-1.24.1", "default": true } ], - "store_path": "/nix/store/34qa7mwbc1ja7758q4d9sjwmgip72lj9-go-1.22.10" + "store_path": "/nix/store/ja4jxx60lh1qfqfl4z4p2rff56ia1c3c-go-1.24.1" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/gbidq6smzj09j6qmcdklrvrjgllxmr5j-go-1.22.10", + "path": "/nix/store/6zvrmsmdg7p8yw3vii20g40b4zsh6kjr-go-1.24.1", "default": true } ], - "store_path": "/nix/store/gbidq6smzj09j6qmcdklrvrjgllxmr5j-go-1.22.10" + "store_path": "/nix/store/6zvrmsmdg7p8yw3vii20g40b4zsh6kjr-go-1.24.1" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/jgz3hrbqblw65v95npdnvlymlm991s0c-go-1.22.10", + "path": "/nix/store/2bcic1xcha2k11djynr488v3pg0nnghr-go-1.24.1", "default": true } ], - "store_path": "/nix/store/jgz3hrbqblw65v95npdnvlymlm991s0c-go-1.22.10" + "store_path": "/nix/store/2bcic1xcha2k11djynr488v3pg0nnghr-go-1.24.1" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/1hd6kq7rssk14py6v8mrdq2pn5ciiw6y-go-1.22.10", + "path": "/nix/store/g29rrn8qqlg4yjqv543ryrkimr7fk43h-go-1.24.1", "default": true } ], - "store_path": "/nix/store/1hd6kq7rssk14py6v8mrdq2pn5ciiw6y-go-1.22.10" + "store_path": "/nix/store/g29rrn8qqlg4yjqv543ryrkimr7fk43h-go-1.24.1" } } }, diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 2c1756a931..3c4d829ca1 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -255,6 +255,10 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use input.MarketType = scope.AWSMachine.Spec.MarketType + input.HostID = scope.AWSMachine.Spec.HostID + + input.HostAffinity = scope.AWSMachine.Spec.HostAffinity + s.scope.Debug("Running instance", "machine-role", scope.Role()) s.scope.Debug("Running instance with instance metadata options", "metadata options", input.InstanceMetadataOptions) out, err := s.runInstance(scope.Role(), input) @@ -674,6 +678,19 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan } } + if i.HostID != nil { + if i.HostAffinity == nil { + i.HostAffinity = aws.String("Default") + } + s.scope.Debug("Running instance with dedicated host placement", "hostId", i.HostID, "affinity", i.HostAffinity) + + input.Placement = &ec2.Placement{ + Tenancy: aws.String("host"), + Affinity: i.HostAffinity, + HostId: i.HostID, + } + } + out, err := s.EC2Client.RunInstancesWithContext(context.TODO(), input) if err != nil { return nil, errors.Wrap(err, "failed to run instance") diff --git a/test/e2e/shared/aws.go b/test/e2e/shared/aws.go index e5f9f9a0a4..e1ccb23128 100644 --- a/test/e2e/shared/aws.go +++ b/test/e2e/shared/aws.go @@ -2366,3 +2366,38 @@ func GetMountTargetState(e2eCtx *E2EContext, mountTargetID string) (*string, err } return result.LifeCycleState, nil } + +func getAvailabilityZone() string { + return "us-west-2a" +} + +func getInstanceType() string { + return "t3.large" +} + +func AllocateHost(e2eCtx *E2EContext) (string, error) { + ec2Svc := ec2.New(e2eCtx.AWSSession) + input := &ec2.AllocateHostsInput{ + AvailabilityZone: aws.String(getAvailabilityZone()), + InstanceType: aws.String(getInstanceType()), + Quantity: aws.Int64(1), + } + output, err := ec2Svc.AllocateHosts(input) + Expect(err).ToNot(HaveOccurred(), "Failed to allocate host") + Expect(len(output.HostIds)).To(BeNumerically(">", 0), "No dedicated host ID returned") + fmt.Println("Allocated Host ID: ", *output.HostIds[0]) + hostId := *output.HostIds[0] + return hostId, nil +} + +func ReleaseHost(e2eCtx *E2EContext, hostID string) { + ec2Svc := ec2.New(e2eCtx.AWSSession) + + input := &ec2.ReleaseHostsInput{ + HostIds: []*string{aws.String(hostID)}, + } + + _, err := ec2Svc.ReleaseHosts(input) + Expect(err).ToNot(HaveOccurred(), "Failed to releasee host") + fmt.Println("Released Host ID: ", hostID) +} diff --git a/test/e2e/shared/resource.go b/test/e2e/shared/resource.go index 88cb8336b0..81cf983aed 100644 --- a/test/e2e/shared/resource.go +++ b/test/e2e/shared/resource.go @@ -44,6 +44,7 @@ type TestResource struct { EC2GPU int `json:"ec2-GPU"` VolumeGP2 int `json:"volume-GP2"` EventBridgeRules int `json:"eventBridge-rules"` + //TODO: DedicatedHost int `json:"dedicated-host"` } func WriteResourceQuotesToFile(logPath string, serviceQuotas map[string]*ServiceQuota) { diff --git a/test/e2e/suites/unmanaged/unmanaged_CAPI_dedicated_host_test.go b/test/e2e/suites/unmanaged/unmanaged_CAPI_dedicated_host_test.go new file mode 100644 index 0000000000..c685048055 --- /dev/null +++ b/test/e2e/suites/unmanaged/unmanaged_CAPI_dedicated_host_test.go @@ -0,0 +1,217 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unmanaged + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/gofrs/flock" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +var _ = ginkgo.Context("[unmanaged] [functional] [ClusterClass] [dedicated-host] [WIP]", func() { + var ( + ctx context.Context + result *clusterctl.ApplyClusterTemplateAndWaitResult + requiredResources *shared.TestResource + ) + + ginkgo.BeforeEach(func() { + ctx = context.TODO() + result = &clusterctl.ApplyClusterTemplateAndWaitResult{} + }) + + ginkgo.Describe("Dedicated Hosts test [ClusterClass]", func() { + ginkgo.It("should create cluster with nested assumed role", func() { + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + specName := "functional-clusterclass-dedicated-host" + requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) + defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) + Expect(shared.SetMultitenancyEnvVars(e2eCtx.AWSSession)).To(Succeed()) + + // Allocate a dedicated host and ensure it is released after the test + ginkgo.By("Allocating a dedicated host") + hostID, err := shared.AllocateHost(e2eCtx) + Expect(err).To(BeNil()) + Expect(hostID).NotTo(BeEmpty()) + ginkgo.By(fmt.Sprintf("Allocated dedicated host ID: %s", hostID)) + defer func() { + ginkgo.By("Releasing the dedicated host") + shared.ReleaseHost(e2eCtx, hostID) + }() + + // Pass the dedicated host ID to the cluster config (e.g., as an env var) + shared.SetEnvVar("DEDICATED_HOST_ID", hostID, false) + + ginkgo.By("Creating cluster") + clusterName := fmt.Sprintf("cluster-%s", util.RandomString(6)) + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + KubeconfigPath: e2eCtx.Environment.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: shared.NestedMultitenancyClusterClassFlavor, + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](0), + //TODO: Change ApplyClusterTemplateAndWait to use HostID + HostID: hostID, + }, + WaitForClusterIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-control-plane"), + }, result) + + ginkgo.By("Checking if bastion host is running") + awsCluster, err := GetAWSClusterByName(ctx, e2eCtx.Environment.BootstrapClusterProxy, namespace.Name, clusterName) + Expect(err).To(BeNil()) + Expect(awsCluster.Status.Bastion.State).To(Equal(infrav1.InstanceStateRunning)) + expectAWSClusterConditions(awsCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionTrue, "", ""}}) + + ginkgo.By("PASSED!") + }) + }) + + ginkgo.Describe("Workload cluster with AWS SSM Parameter as the Secret Backend [ClusterClass]", func() { + ginkgo.It("should be creatable and deletable", func() { + specName := "functional-test-ssm-parameter-store-clusterclass" + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) + defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) + + ginkgo.By("Creating a cluster") + clusterName := fmt.Sprintf("cluster-%s", util.RandomString(6)) + configCluster := defaultConfigCluster(clusterName, namespace.Name) + configCluster.ControlPlaneMachineCount = ptr.To[int64](1) + configCluster.WorkerMachineCount = ptr.To[int64](1) + configCluster.Flavor = shared.TopologyFlavor + _, md, _ := createCluster(ctx, configCluster, result) + + workerMachines := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{ + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + ClusterName: clusterName, + Namespace: namespace.Name, + MachineDeployment: *md[0], + }) + controlPlaneMachines := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{ + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + ClusterName: clusterName, + Namespace: namespace.Name, + }) + Expect(len(workerMachines)).To(Equal(1)) + Expect(len(controlPlaneMachines)).To(Equal(1)) + }) + }) + + // This test creates a workload cluster using an externally managed VPC and subnets. CAPA is still handling security group + // creation for the cluster. All applicable resources are restricted to us-west-2a for simplicity. + ginkgo.Describe("Workload cluster with external infrastructure [ClusterClass]", func() { + var namespace *corev1.Namespace + var requiredResources *shared.TestResource + specName := "functional-test-extinfra-cc" + mgmtClusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + mgmtClusterInfra := new(shared.AWSInfrastructure) + + // Some infrastructure creation was moved to a setup node to better organize the test. + ginkgo.JustBeforeEach(func() { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 5, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) + ginkgo.By("Creating the management cluster infrastructure") + mgmtClusterInfra.New(shared.AWSInfrastructureSpec{ + ClusterName: mgmtClusterName, + VpcCidr: "10.0.0.0/23", + PublicSubnetCidr: "10.0.0.0/24", + PrivateSubnetCidr: "10.0.1.0/24", + AvailabilityZone: "us-west-2a", + }, e2eCtx) + mgmtClusterInfra.CreateInfrastructure() + }) + + // Infrastructure cleanup is done in setup node so it is not bypassed if there is a test failure in the subject node. + ginkgo.JustAfterEach(func() { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) + if !e2eCtx.Settings.SkipCleanup { + ginkgo.By("Deleting the management cluster infrastructure") + mgmtClusterInfra.DeleteInfrastructure() + } + }) + + ginkgo.It("should create workload cluster in external VPC", func() { + ginkgo.By("Validating management infrastructure") + Expect(mgmtClusterInfra.VPC).NotTo(BeNil()) + Expect(*mgmtClusterInfra.State.VpcState).To(Equal("available")) + Expect(len(mgmtClusterInfra.Subnets)).To(Equal(2)) + Expect(mgmtClusterInfra.InternetGateway).NotTo(BeNil()) + Expect(mgmtClusterInfra.ElasticIP).NotTo(BeNil()) + Expect(mgmtClusterInfra.NatGateway).NotTo(BeNil()) + Expect(len(mgmtClusterInfra.RouteTables)).To(Equal(2)) + + shared.SetEnvVar("BYO_VPC_ID", *mgmtClusterInfra.VPC.VpcId, false) + shared.SetEnvVar("BYO_PUBLIC_SUBNET_ID", *mgmtClusterInfra.State.PublicSubnetID, false) + shared.SetEnvVar("BYO_PRIVATE_SUBNET_ID", *mgmtClusterInfra.State.PrivateSubnetID, false) + + ginkgo.By("Creating a management cluster in a peered VPC") + mgmtConfigCluster := defaultConfigCluster(mgmtClusterName, namespace.Name) + mgmtConfigCluster.WorkerMachineCount = ptr.To[int64](1) + mgmtConfigCluster.Flavor = "external-vpc-clusterclass" + mgmtCluster, mgmtMD, _ := createCluster(ctx, mgmtConfigCluster, result) + + mgmtWM := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{ + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + ClusterName: mgmtClusterName, + Namespace: namespace.Name, + MachineDeployment: *mgmtMD[0], + }) + mgmtCPM := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{ + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + ClusterName: mgmtClusterName, + Namespace: namespace.Name, + }) + Expect(len(mgmtWM)).To(Equal(1)) + Expect(len(mgmtCPM)).To(Equal(1)) + ginkgo.By("Deleting the management cluster") + deleteCluster(ctx, mgmtCluster) + }) + }) +}) diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go index d7426a5ce4..022f992462 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_clusterclass_test.go @@ -32,6 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -75,7 +76,6 @@ var _ = ginkgo.Context("[unmanaged] [functional] [ClusterClass]", func() { ClusterName: clusterName, KubernetesVersion: e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion), ControlPlaneMachineCount: ptr.To[int64](1), - WorkerMachineCount: ptr.To[int64](0), }, WaitForClusterIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-control-plane"), @@ -146,7 +146,8 @@ var _ = ginkgo.Context("[unmanaged] [functional] [ClusterClass]", func() { VpcCidr: "10.0.0.0/23", PublicSubnetCidr: "10.0.0.0/24", PrivateSubnetCidr: "10.0.1.0/24", - AvailabilityZone: "us-west-2a", + //TODO: Is this standard? + AvailabilityZone: "us-west-2a", }, e2eCtx) mgmtClusterInfra.CreateInfrastructure() })