From 22c39b5af511b6e00f8e195342f908c11969627e Mon Sep 17 00:00:00 2001 From: Weiwei Li Date: Fri, 18 Apr 2025 10:35:50 -0700 Subject: [PATCH] Enable frontend NLB --- controllers/gateway/gateway_controller.go | 3 +- controllers/ingress/group_controller.go | 85 +- controllers/service/service_controller.go | 2 +- docs/guide/ingress/annotations.md | 128 +++ pkg/annotations/constants.go | 105 +- .../elbv2/frontend_nlb_target_synthesizer.go | 166 +++ .../elbv2/frontend_nlb_targets_manager.go | 99 ++ pkg/deploy/elbv2/target_group_synthesizer.go | 2 + pkg/deploy/stack_deployer.go | 20 +- pkg/ingress/model_build_frontend_nlb.go | 768 ++++++++++++++ pkg/ingress/model_build_frontend_nlb_test.go | 966 ++++++++++++++++++ pkg/ingress/model_builder.go | 35 +- pkg/ingress/model_builder_test.go | 2 +- pkg/model/core/frontend_nlb_target_group.go | 31 + pkg/model/elbv2/target_group.go | 2 + test/e2e/ingress/utils.go | 21 +- test/e2e/ingress/vanilla_ingress_test.go | 78 ++ 17 files changed, 2434 insertions(+), 79 deletions(-) create mode 100644 pkg/deploy/elbv2/frontend_nlb_target_synthesizer.go create mode 100644 pkg/deploy/elbv2/frontend_nlb_targets_manager.go create mode 100644 pkg/ingress/model_build_frontend_nlb.go create mode 100644 pkg/ingress/model_build_frontend_nlb_test.go create mode 100644 pkg/model/core/frontend_nlb_target_group.go diff --git a/controllers/gateway/gateway_controller.go b/controllers/gateway/gateway_controller.go index 6c91111124..76b80a0db2 100644 --- a/controllers/gateway/gateway_controller.go +++ b/controllers/gateway/gateway_controller.go @@ -3,6 +3,7 @@ package gateway import ( "context" "fmt" + "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -242,7 +243,7 @@ func (r *gatewayReconciler) reconcileUpdate(ctx context.Context, gw *gwv1.Gatewa } func (r *gatewayReconciler) deployModel(ctx context.Context, gw *gwv1.Gateway, stack core.Stack) error { - if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName); err != nil { + if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName, nil); err != nil { var requeueNeededAfter *runtime.RequeueNeededAfter if errors.As(err, &requeueNeededAfter) { return err diff --git a/controllers/ingress/group_controller.go b/controllers/ingress/group_controller.go index 75f16ad824..3355f0f311 100644 --- a/controllers/ingress/group_controller.go +++ b/controllers/ingress/group_controller.go @@ -151,7 +151,7 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request) return errmetrics.NewErrorWithMetrics(controllerName, "add_group_finalizer_error", err, r.metricsCollector) } - _, lb, err := r.buildAndDeployModel(ctx, ingGroup) + _, lb, frontendNlb, err := r.buildAndDeployModel(ctx, ingGroup) if err != nil { return err } @@ -164,7 +164,14 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request) if statusErr != nil { return } - statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS) + var frontendNlbDNS string + if frontendNlb != nil { + frontendNlbDNS, statusErr = frontendNlb.DNSName().Resolve(ctx) + if statusErr != nil { + return + } + } + statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS, frontendNlbDNS) if statusErr != nil { r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedUpdateStatus, fmt.Sprintf("Failed update status due to %v", statusErr)) @@ -191,38 +198,40 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request) return nil } -func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, error) { +func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, *elbv2model.LoadBalancer, error) { var stack core.Stack var lb *elbv2model.LoadBalancer var secrets []types.NamespacedName var backendSGRequired bool var err error + var frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState + var frontendNlb *elbv2model.LoadBalancer buildModelFn := func() { - stack, lb, secrets, backendSGRequired, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector) + stack, lb, secrets, backendSGRequired, frontendNlbTargetGroupDesiredState, frontendNlb, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector) } r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "build_model", buildModelFn) if err != nil { r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err)) - return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector) + return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector) } stackJSON, err := r.stackMarshaller.Marshal(stack) if err != nil { r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err)) - return nil, nil, err + return nil, nil, nil, err } r.logger.Info("successfully built model", "model", stackJSON) deployModelFn := func() { - err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress") + err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress", frontendNlbTargetGroupDesiredState) } r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "deploy_model", deployModelFn) if err != nil { var requeueNeededAfter *runtime.RequeueNeededAfter if errors.As(err, &requeueNeededAfter) { - return nil, nil, err + return nil, nil, nil, err } r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedDeployModel, fmt.Sprintf("Failed deploy model due to %v", err)) - return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector) + return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector) } r.logger.Info("successfully deployed model", "ingressGroup", ingGroup.ID) r.secretsManager.MonitorSecrets(ingGroup.ID.String(), secrets) @@ -232,9 +241,9 @@ func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingr inactiveResources = append(inactiveResources, k8s.ToSliceOfNamespacedNames(ingGroup.Members)...) } if err := r.backendSGProvider.Release(ctx, networkingpkg.ResourceTypeIngress, inactiveResources); err != nil { - return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector) + return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector) } - return stack, lb, nil + return stack, lb, frontendNlb, nil } func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup ingress.Group, eventType string, reason string, message string) { @@ -243,29 +252,41 @@ func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup in } } -func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string) error { +func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string, frontendNLBDNS string) error { for _, member := range ingGroup.Members { - if err := r.updateIngressStatus(ctx, lbDNS, member.Ing); err != nil { + if err := r.updateIngressStatus(ctx, lbDNS, frontendNLBDNS, member.Ing); err != nil { return err } } return nil } -func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, ing *networking.Ingress) error { +func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, frontendNlbDNS string, ing *networking.Ingress) error { + ingOld := ing.DeepCopy() if len(ing.Status.LoadBalancer.Ingress) != 1 || ing.Status.LoadBalancer.Ingress[0].IP != "" || ing.Status.LoadBalancer.Ingress[0].Hostname != lbDNS { - ingOld := ing.DeepCopy() ing.Status.LoadBalancer.Ingress = []networking.IngressLoadBalancerIngress{ { Hostname: lbDNS, }, } + } + + // Ensure frontendNLBDNS is appended if it is not already added + if frontendNlbDNS != "" && !hasFrontendNlbHostName(ing.Status.LoadBalancer.Ingress, frontendNlbDNS) { + ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networking.IngressLoadBalancerIngress{ + Hostname: frontendNlbDNS, + }) + } + + if !isIngressStatusEqual(ingOld.Status.LoadBalancer.Ingress, ing.Status.LoadBalancer.Ingress) { if err := r.k8sClient.Status().Patch(ctx, ing, client.MergeFrom(ingOld)); err != nil { return errors.Wrapf(err, "failed to update ingress status: %v", k8s.NamespacedName(ing)) } + } + return nil } @@ -387,3 +408,37 @@ func isResourceKindAvailable(resList *metav1.APIResourceList, kind string) bool } return false } + +func isIngressStatusEqual(a, b []networking.IngressLoadBalancerIngress) bool { + if len(a) != len(b) { + return false + } + + setA := make(map[string]struct{}, len(a)) + setB := make(map[string]struct{}, len(b)) + + for _, ingress := range a { + setA[ingress.Hostname] = struct{}{} + } + + for _, ingress := range b { + setB[ingress.Hostname] = struct{}{} + } + + for key := range setA { + if _, exists := setB[key]; !exists { + return false + } + } + return true +} + +func hasFrontendNlbHostName(ingressList []networking.IngressLoadBalancerIngress, frontendNlbDNS string) bool { + for _, ingress := range ingressList { + if ingress.Hostname == frontendNlbDNS { + return true + } + + } + return false +} diff --git a/controllers/service/service_controller.go b/controllers/service/service_controller.go index 6aa7383dfd..e89bc546c5 100644 --- a/controllers/service/service_controller.go +++ b/controllers/service/service_controller.go @@ -152,7 +152,7 @@ func (r *serviceReconciler) buildModel(ctx context.Context, svc *corev1.Service) } func (r *serviceReconciler) deployModel(ctx context.Context, svc *corev1.Service, stack core.Stack) error { - if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service"); err != nil { + if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service", nil); err != nil { var requeueNeededAfter *runtime.RequeueNeededAfter if errors.As(err, &requeueNeededAfter) { return err diff --git a/docs/guide/ingress/annotations.md b/docs/guide/ingress/annotations.md index c1c6c7fcd1..af72f5a599 100644 --- a/docs/guide/ingress/annotations.md +++ b/docs/guide/ingress/annotations.md @@ -63,6 +63,19 @@ You can add annotations to kubernetes Ingress and Service objects to customize t | [alb.ingress.kubernetes.io/listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap |N/A| Ingress |Merge| | [alb.ingress.kubernetes.io/minimum-load-balancer-capacity](#load-balancer-capacity-reservation) | stringMap |N/A| Ingress | Exclusive | | [alb.ingress.kubernetes.io/ipam-ipv4-pool-id](#ipam-ipv4-pool-id) | string |N/A| Ingress | Exclusive | +| [alb.ingress.kubernetes.io/enable-frontend-nlb](#enable-frontend-nlb) | boolean |false | Ingress | Exclusive | +| [alb.ingress.kubernetes.io/frontend-nlb-scheme](#frontend-nlb-scheme) | internal \| internet-facing |internal| Ingress | Exclusive | +| [alb.ingress.kubernetes.io/frontend-nlb-subnets](#frontend-nlb-subnets) | stringList |N/A| Ingress | Exclusive | +| [alb.ingress.kubernetes.io/frontend-nlb-security-groups](#frontend-nlb-security-groups) | stringList |N/A| Ingress | Exclusive | +| [alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping](#frontend-nlb-listener-port-mapping) | stringMap |N/A| Ingress | Merge | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port](#frontend-nlb-healthcheck-port) | integer \| traffic-port |traffic-port| Ingress | N/A | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol](#frontend-nlb-healthcheck-protocol) | HTTP \| HTTPS |HTTP| Ingress | N/A | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path](#frontend-nlb-healthcheck-path) | string |/| Ingress | N/A | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds](#frontend-nlb-healthcheck-interval-seconds) | integer |15| Ingress | N/A | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds](#frontend-nlb-healthcheck-timeout-seconds) | integer |5| Ingress | N/A | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count](#frontend-nlb-healthcheck-healthy-threshold-count) | integer |3| Ingress | N/A | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count](#frontend-nlb-healthcheck-unhealthy-threshold-count) | integer |3| Ingress | N/A | +| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes](#frontend-nlb-healthcheck-success-codes) | string |200| Ingress | N/A | ## IngressGroup IngressGroup feature enables you to group multiple Ingress resources together. @@ -1024,3 +1037,118 @@ Load balancer capacity unit reservation can be configured via following annotati - disable shield protection ```alb.ingress.kubernetes.io/shield-advanced-protection: 'false' ``` + + +## Enable frontend NLB +When this option is set to true, the controller will automatically provision a Network Load Balancer and register the Application Load Balancer as its target. Additional annotations are available to customize the NLB configurations, including options for scheme, security groups, subnets, and health check. The ingress resource will have two status entries, one for the NLB DNS and one for the ALB DNS. This allows users to combine the benefits of NLB and ALB into a single solution, leveraging NLB features like static IP address and PrivateLink, while retaining the rich routing capabilities of ALB. + +!!!warning + - If you need to change the ALB [scheme](#scheme), make sure to disable this feature first. Changing the scheme will create a new ALB, which could interfere with the current configuration. + - If you create ingress and enable the feature at once, provisioning the NLB and registering the ALB as target can take up to 3-4 mins to complete. + +- `alb.ingress.kubernetes.io/enable-frontend-nlb` enables frontend Network Load Balancer functionality. + + !!!example + - Enable frontend nlb + ``` + alb.ingress.kubernetes.io/enable-frontend-nlb: "true" + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-scheme` specifies the scheme for the Network Load Balancer. + + !!!example + - Set NLB scheme to internet-facing + ``` + alb.ingress.kubernetes.io/frontend-nlb-scheme: internet-facing + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-subnets` specifies the subnets for the Network Load Balancer. + + !!!example + - Specify subnets for NLB + ``` + alb.ingress.kubernetes.io/frontend-nlb-subnets: subnet-xxxx1,subnet-xxxx2 + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-security-groups` specifies the security groups for the Network Load Balancer. + + !!!example + - Specify security groups for NLB + ``` + alb.ingress.kubernetes.io/frontend-nlb-security-groups: sg-xxxx1,sg-xxxx2 + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping` specifies the port mapping configuration for the Network Load Balancer listeners. + + !!!note "Default" + - The port defaults to match the ALB listener port, based on whether `alb.ingress.kubernetes.io/listen-ports`(#listen-ports) is specified. + + !!!example + - Forward TCP traffic from NLB:80 to ALB:443 + ``` + alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping: 80=443 + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port` specifies the port used for health checks. + + !!!example + - Set health check port + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port: traffic-port + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol` specifies the protocol used for health checks. + + !!!example + - Set health check protocol + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol: HTTP + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path` specifies the destination path for health checks. + + !!!example + - Set health check path + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path: /health + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds` specifies the interval between consecutive health checks. + + !!!example + - Set health check interval + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds: '15' + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds` specifies the target group health check timeout. + + !!!example + - Set health check timeout + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds: '5' + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count` specifies the consecutive health check successes required before a target is considered healthy. + + !!!example + - Set healthy threshold count + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count: '3' + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count` specifies the consecutive health check failures before a target gets marked unhealthy. + + !!!example + - Set unhealthy threshold count + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count: '3' + ``` + +- `alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes` specifies the HTTP codes that indicate a successful health check. + + !!!example + - Set success codes for health check + ``` + alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes: '200' + ``` diff --git a/pkg/annotations/constants.go b/pkg/annotations/constants.go index c9e178a2f5..870ccc39c4 100644 --- a/pkg/annotations/constants.go +++ b/pkg/annotations/constants.go @@ -13,52 +13,65 @@ const ( AnnotationPrefixIngress = "alb.ingress.kubernetes.io" // Ingress annotation suffixes - IngressSuffixLoadBalancerName = "load-balancer-name" - IngressSuffixGroupName = "group.name" - IngressSuffixGroupOrder = "group.order" - IngressSuffixTags = "tags" - IngressSuffixIPAddressType = "ip-address-type" - IngressSuffixScheme = "scheme" - IngressSuffixSubnets = "subnets" - IngressSuffixCustomerOwnedIPv4Pool = "customer-owned-ipv4-pool" - IngressSuffixLoadBalancerAttributes = "load-balancer-attributes" - IngressSuffixWAFv2ACLARN = "wafv2-acl-arn" - IngressSuffixWAFACLID = "waf-acl-id" - IngressSuffixWebACLID = "web-acl-id" // deprecated, use "waf-acl-id" instead. - IngressSuffixShieldAdvancedProtection = "shield-advanced-protection" - IngressSuffixSecurityGroups = "security-groups" - IngressSuffixListenPorts = "listen-ports" - IngressSuffixSSLRedirect = "ssl-redirect" - IngressSuffixInboundCIDRs = "inbound-cidrs" - IngressSuffixCertificateARN = "certificate-arn" - IngressSuffixSSLPolicy = "ssl-policy" - IngressSuffixTargetType = "target-type" - IngressSuffixBackendProtocol = "backend-protocol" - IngressSuffixBackendProtocolVersion = "backend-protocol-version" - IngressSuffixTargetGroupAttributes = "target-group-attributes" - IngressSuffixHealthCheckPort = "healthcheck-port" - IngressSuffixHealthCheckProtocol = "healthcheck-protocol" - IngressSuffixHealthCheckPath = "healthcheck-path" - IngressSuffixHealthCheckIntervalSeconds = "healthcheck-interval-seconds" - IngressSuffixHealthCheckTimeoutSeconds = "healthcheck-timeout-seconds" - IngressSuffixHealthyThresholdCount = "healthy-threshold-count" - IngressSuffixUnhealthyThresholdCount = "unhealthy-threshold-count" - IngressSuffixSuccessCodes = "success-codes" - IngressSuffixAuthType = "auth-type" - IngressSuffixAuthIDPCognito = "auth-idp-cognito" - IngressSuffixAuthIDPOIDC = "auth-idp-oidc" - IngressSuffixAuthOnUnauthenticatedRequest = "auth-on-unauthenticated-request" - IngressSuffixAuthScope = "auth-scope" - IngressSuffixAuthSessionCookie = "auth-session-cookie" - IngressSuffixAuthSessionTimeout = "auth-session-timeout" - IngressSuffixTargetNodeLabels = "target-node-labels" - IngressSuffixManageSecurityGroupRules = "manage-backend-security-group-rules" - IngressSuffixMutualAuthentication = "mutual-authentication" - IngressSuffixSecurityGroupPrefixLists = "security-group-prefix-lists" - IngressSuffixlsAttsAnnotationPrefix = "listener-attributes" - IngressLBSuffixMultiClusterTargetGroup = "multi-cluster-target-group" - IngressSuffixLoadBalancerCapacityReservation = "minimum-load-balancer-capacity" - IngressSuffixIPAMIPv4PoolId = "ipam-ipv4-pool-id" + IngressSuffixLoadBalancerName = "load-balancer-name" + IngressSuffixGroupName = "group.name" + IngressSuffixGroupOrder = "group.order" + IngressSuffixTags = "tags" + IngressSuffixIPAddressType = "ip-address-type" + IngressSuffixScheme = "scheme" + IngressSuffixSubnets = "subnets" + IngressSuffixCustomerOwnedIPv4Pool = "customer-owned-ipv4-pool" + IngressSuffixLoadBalancerAttributes = "load-balancer-attributes" + IngressSuffixWAFv2ACLARN = "wafv2-acl-arn" + IngressSuffixWAFACLID = "waf-acl-id" + IngressSuffixWebACLID = "web-acl-id" // deprecated, use "waf-acl-id" instead. + IngressSuffixShieldAdvancedProtection = "shield-advanced-protection" + IngressSuffixSecurityGroups = "security-groups" + IngressSuffixListenPorts = "listen-ports" + IngressSuffixSSLRedirect = "ssl-redirect" + IngressSuffixInboundCIDRs = "inbound-cidrs" + IngressSuffixCertificateARN = "certificate-arn" + IngressSuffixSSLPolicy = "ssl-policy" + IngressSuffixTargetType = "target-type" + IngressSuffixBackendProtocol = "backend-protocol" + IngressSuffixBackendProtocolVersion = "backend-protocol-version" + IngressSuffixTargetGroupAttributes = "target-group-attributes" + IngressSuffixHealthCheckPort = "healthcheck-port" + IngressSuffixHealthCheckProtocol = "healthcheck-protocol" + IngressSuffixHealthCheckPath = "healthcheck-path" + IngressSuffixHealthCheckIntervalSeconds = "healthcheck-interval-seconds" + IngressSuffixHealthCheckTimeoutSeconds = "healthcheck-timeout-seconds" + IngressSuffixHealthyThresholdCount = "healthy-threshold-count" + IngressSuffixUnhealthyThresholdCount = "unhealthy-threshold-count" + IngressSuffixSuccessCodes = "success-codes" + IngressSuffixAuthType = "auth-type" + IngressSuffixAuthIDPCognito = "auth-idp-cognito" + IngressSuffixAuthIDPOIDC = "auth-idp-oidc" + IngressSuffixAuthOnUnauthenticatedRequest = "auth-on-unauthenticated-request" + IngressSuffixAuthScope = "auth-scope" + IngressSuffixAuthSessionCookie = "auth-session-cookie" + IngressSuffixAuthSessionTimeout = "auth-session-timeout" + IngressSuffixTargetNodeLabels = "target-node-labels" + IngressSuffixManageSecurityGroupRules = "manage-backend-security-group-rules" + IngressSuffixMutualAuthentication = "mutual-authentication" + IngressSuffixSecurityGroupPrefixLists = "security-group-prefix-lists" + IngressSuffixlsAttsAnnotationPrefix = "listener-attributes" + IngressLBSuffixMultiClusterTargetGroup = "multi-cluster-target-group" + IngressSuffixLoadBalancerCapacityReservation = "minimum-load-balancer-capacity" + IngressSuffixIPAMIPv4PoolId = "ipam-ipv4-pool-id" + IngressSuffixEnableFrontendNlb = "enable-frontend-nlb" + IngressSuffixFrontendNlbScheme = "frontend-nlb-scheme" + IngressSuffixFrontendNlbSubnets = "frontend-nlb-subnets" + IngressSuffixFrontendNlbSecurityGroups = "frontend-nlb-security-groups" + IngressSuffixFrontendNlbListenerPortMapping = "frontend-nlb-listener-port-mapping" + IngressSuffixFrontendNlbHealthCheckPort = "frontend-nlb-healthcheck-port" + IngressSuffixFrontendNlbHealthCheckProtocol = "frontend-nlb-healthcheck-protocol" + IngressSuffixFrontendNlbHealthCheckPath = "frontend-nlb-healthcheck-path" + IngressSuffixFrontendNlbHealthCheckIntervalSeconds = "frontend-nlb-healthcheck-interval-seconds" + IngressSuffixFrontendNlbHealthCheckTimeoutSeconds = "frontend-nlb-healthcheck-timeout-seconds" + IngressSuffixFrontendNlbHealthCheckHealthyThresholdCount = "frontend-nlb-healthcheck-healthy-threshold-count" + IngressSuffixFrontendNlHealthCheckbUnhealthyThresholdCount = "frontend-nlb-healthcheck-unhealthy-threshold-count" + IngressSuffixFrontendNlbHealthCheckSuccessCodes = "frontend-nlb-healthcheck-success-codes" // NLB annotation suffixes // prefixes service.beta.kubernetes.io, service.kubernetes.io diff --git a/pkg/deploy/elbv2/frontend_nlb_target_synthesizer.go b/pkg/deploy/elbv2/frontend_nlb_target_synthesizer.go new file mode 100644 index 0000000000..a21a6897a3 --- /dev/null +++ b/pkg/deploy/elbv2/frontend_nlb_target_synthesizer.go @@ -0,0 +1,166 @@ +package elbv2 + +import ( + "context" + "time" + + awssdk "github.com/aws/aws-sdk-go-v2/aws" + elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/go-logr/logr" + "github.com/pkg/errors" + "sigs.k8s.io/aws-load-balancer-controller/pkg/config" + "sigs.k8s.io/aws-load-balancer-controller/pkg/deploy/tracking" + "sigs.k8s.io/aws-load-balancer-controller/pkg/model/core" + elbv2model "sigs.k8s.io/aws-load-balancer-controller/pkg/model/elbv2" + "sigs.k8s.io/aws-load-balancer-controller/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func NewFrontendNlbTargetSynthesizer(k8sClient client.Client, trackingProvider tracking.Provider, taggingManager TaggingManager, frontendNlbTargetsManager FrontendNlbTargetsManager, logger logr.Logger, featureGates config.FeatureGates, stack core.Stack, frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState) *frontendNlbTargetSynthesizer { + return &frontendNlbTargetSynthesizer{ + k8sClient: k8sClient, + trackingProvider: trackingProvider, + taggingManager: taggingManager, + frontendNlbTargetsManager: frontendNlbTargetsManager, + featureGates: featureGates, + logger: logger, + stack: stack, + frontendNlbTargetGroupDesiredState: frontendNlbTargetGroupDesiredState, + } +} + +type frontendNlbTargetSynthesizer struct { + k8sClient client.Client + trackingProvider tracking.Provider + taggingManager TaggingManager + frontendNlbTargetsManager FrontendNlbTargetsManager + featureGates config.FeatureGates + logger logr.Logger + stack core.Stack + frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState +} + +// Synthesize processes AWS target groups and deregisters ALB targets based on the desired state. +func (s *frontendNlbTargetSynthesizer) Synthesize(ctx context.Context) error { + var resTGs []*elbv2model.TargetGroup + s.stack.ListResources(&resTGs) + sdkTGs, err := s.findSDKTargetGroups(ctx) + if err != nil { + return err + } + _, _, unmatchedSDKTGs, err := matchResAndSDKTargetGroups(resTGs, sdkTGs, + s.trackingProvider.ResourceIDTagKey(), s.featureGates) + if err != nil { + return err + } + + for _, sdkTG := range unmatchedSDKTGs { + if sdkTG.TargetGroup.TargetType != elbv2types.TargetTypeEnumAlb { + continue + } + + err := s.deregisterCurrentTarget(ctx, sdkTG) + if err != nil { + return errors.Wrapf(err, "failed to deregister target for the target group: %s", *sdkTG.TargetGroup.TargetGroupArn) + } + } + + return nil + +} + +func (s *frontendNlbTargetSynthesizer) deregisterCurrentTarget(ctx context.Context, sdkTG TargetGroupWithTags) error { + // Retrieve the current targets for the target group + currentTargets, err := s.frontendNlbTargetsManager.ListTargets(ctx, *sdkTG.TargetGroup.TargetGroupArn) + if err != nil { + return errors.Wrapf(err, "failed to list current target for target group: %s", *sdkTG.TargetGroup.TargetGroupArn) + } + + // If there is no target, nothing to deregister + if len(currentTargets) == 0 { + return nil + } + + // Deregister current target + s.logger.Info("Deregistering current target", + "targetGroupARN", *sdkTG.TargetGroup.TargetGroupArn, + "target", currentTargets[0].Target.Id, + "port", currentTargets[0].Target.Port, + ) + + err = s.frontendNlbTargetsManager.DeregisterTargets(ctx, *sdkTG.TargetGroup.TargetGroupArn, elbv2types.TargetDescription{ + Id: awssdk.String(*currentTargets[0].Target.Id), + Port: awssdk.Int32(*currentTargets[0].Target.Port), + }) + + if err != nil { + return errors.Wrapf(err, "failed to deregister targets for target group: %s", *sdkTG.TargetGroup.TargetGroupArn) + } + + return nil +} + +func (s *frontendNlbTargetSynthesizer) PostSynthesize(ctx context.Context) error { + var resTGs []*elbv2model.TargetGroup + s.stack.ListResources(&resTGs) + + // Filter desired target group to include only ALB type target group + albResTGs := filterALBTargetGroups(resTGs) + + for _, resTG := range albResTGs { + + // Skip target group that are not yet created + if resTG.Status.TargetGroupARN == "" { + continue + } + + // List current targets + currentTargets, err := s.frontendNlbTargetsManager.ListTargets(ctx, resTG.Status.TargetGroupARN) + + if err != nil { + return err + } + + desiredTarget, err := s.frontendNlbTargetGroupDesiredState.TargetGroups[resTG.Spec.Name].TargetARN.Resolve(ctx) + desiredTargetPort := s.frontendNlbTargetGroupDesiredState.TargetGroups[resTG.Spec.Name].TargetPort + + if err != nil { + return errors.Wrapf(err, "failed to resolve the desiredTarget for target group: %s", desiredTarget) + } + + if len(currentTargets) == 0 || + currentTargets[0].Target == nil || + currentTargets[0].Target.Id == nil || + *currentTargets[0].Target.Id != desiredTarget { + err = s.frontendNlbTargetsManager.RegisterTargets(ctx, resTG.Status.TargetGroupARN, elbv2types.TargetDescription{ + Id: awssdk.String(desiredTarget), + Port: awssdk.Int32(desiredTargetPort), + }) + + if err != nil { + requeueMsg := "Failed to register target, retrying after deplay for target group: " + resTG.Status.TargetGroupARN + return runtime.NewRequeueNeededAfter(requeueMsg, 15*time.Second) + } + + } + + } + + return nil +} + +func filterALBTargetGroups(targetGroups []*elbv2model.TargetGroup) []*elbv2model.TargetGroup { + var filteredTargetGroups []*elbv2model.TargetGroup + for _, tg := range targetGroups { + if elbv2types.TargetTypeEnum(tg.Spec.TargetType) == elbv2types.TargetTypeEnumAlb { + filteredTargetGroups = append(filteredTargetGroups, tg) + } + } + return filteredTargetGroups +} + +func (s *frontendNlbTargetSynthesizer) findSDKTargetGroups(ctx context.Context) ([]TargetGroupWithTags, error) { + stackTags := s.trackingProvider.StackTags(s.stack) + return s.taggingManager.ListTargetGroups(ctx, + tracking.TagsAsTagFilter(stackTags)) +} diff --git a/pkg/deploy/elbv2/frontend_nlb_targets_manager.go b/pkg/deploy/elbv2/frontend_nlb_targets_manager.go new file mode 100644 index 0000000000..acb8c33469 --- /dev/null +++ b/pkg/deploy/elbv2/frontend_nlb_targets_manager.go @@ -0,0 +1,99 @@ +package elbv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + awssdk "github.com/aws/aws-sdk-go-v2/aws" + elbv2sdk "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/go-logr/logr" + "github.com/pkg/errors" + "sigs.k8s.io/aws-load-balancer-controller/pkg/aws/services" +) + +// FrontendNlbTargetsManager is an abstraction around ELBV2's targets API. +type FrontendNlbTargetsManager interface { + // Register Targets into TargetGroup. + RegisterTargets(ctx context.Context, tgArn string, albTarget elbv2types.TargetDescription) error + + // Deregister Targets from TargetGroup. + DeregisterTargets(ctx context.Context, tgArn string, albTarget elbv2types.TargetDescription) error + + // List Targets from TargetGroup. + ListTargets(ctx context.Context, tgArn string) ([]elbv2types.TargetHealthDescription, error) +} + +// NewFrontendNlbTargetsManager constructs new frontendNlbTargetsManager +func NewFrontendNlbTargetsManager(elbv2Client services.ELBV2, logger logr.Logger) *frontendNlbTargetsManager { + return &frontendNlbTargetsManager{ + elbv2Client: elbv2Client, + logger: logger, + } +} + +var _ FrontendNlbTargetsManager = &frontendNlbTargetsManager{} + +type frontendNlbTargetsManager struct { + elbv2Client services.ELBV2 + logger logr.Logger +} + +func (m *frontendNlbTargetsManager) RegisterTargets(ctx context.Context, tgARN string, albTarget elbv2types.TargetDescription) error { + targets := []elbv2types.TargetDescription{albTarget} + req := &elbv2sdk.RegisterTargetsInput{ + TargetGroupArn: aws.String(tgARN), + Targets: targets, + } + m.logger.Info("registering targets", + "arn", tgARN, + "targets", albTarget) + + _, err := m.elbv2Client.RegisterTargetsWithContext(ctx, req) + + if err != nil { + return errors.Wrap(err, "failed to register targets") + } + + m.logger.Info("registered targets", + "arn", tgARN) + return nil +} + +func (m *frontendNlbTargetsManager) DeregisterTargets(ctx context.Context, tgARN string, albTarget elbv2types.TargetDescription) error { + targets := []elbv2types.TargetDescription{albTarget} + m.logger.Info("deRegistering targets", + "arn", tgARN, + "targets", targets) + req := &elbv2sdk.DeregisterTargetsInput{ + TargetGroupArn: aws.String(tgARN), + Targets: targets, + } + _, err := m.elbv2Client.DeregisterTargetsWithContext(ctx, req) + if err != nil { + return errors.Wrap(err, "failed to deregister targets") + } + m.logger.Info("deregistered targets", + "arn", tgARN) + + return nil +} + +func (m *frontendNlbTargetsManager) ListTargets(ctx context.Context, tgARN string) ([]elbv2types.TargetHealthDescription, error) { + m.logger.Info("Listing targets", + "arn", tgARN) + + resp, err := m.elbv2Client.DescribeTargetHealthWithContext(ctx, &elbv2sdk.DescribeTargetHealthInput{ + TargetGroupArn: awssdk.String(tgARN), + }) + + if err != nil { + return make([]elbv2types.TargetHealthDescription, 0), err + } + + if len(resp.TargetHealthDescriptions) != 0 { + return resp.TargetHealthDescriptions, nil + } + + return make([]elbv2types.TargetHealthDescription, 0), nil +} diff --git a/pkg/deploy/elbv2/target_group_synthesizer.go b/pkg/deploy/elbv2/target_group_synthesizer.go index 690dae5a29..11d7e54221 100644 --- a/pkg/deploy/elbv2/target_group_synthesizer.go +++ b/pkg/deploy/elbv2/target_group_synthesizer.go @@ -2,6 +2,7 @@ package elbv2 import ( "context" + awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/go-logr/logr" "github.com/pkg/errors" @@ -166,6 +167,7 @@ func isSDKTargetGroupRequiresReplacement(sdkTG TargetGroupWithTags, resTG *elbv2 if string(resTG.Spec.TargetType) != string(sdkTG.TargetGroup.TargetType) { return true } + if string(resTG.Spec.Protocol) != string(sdkTG.TargetGroup.Protocol) { return true } diff --git a/pkg/deploy/stack_deployer.go b/pkg/deploy/stack_deployer.go index 7ca4aa3af8..d0ddc214bb 100644 --- a/pkg/deploy/stack_deployer.go +++ b/pkg/deploy/stack_deployer.go @@ -20,10 +20,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + ingressController = "ingress" +) + // StackDeployer will deploy a resource stack into AWS and K8S. type StackDeployer interface { // Deploy a resource stack. - Deploy(ctx context.Context, stack core.Stack, metricsCollector lbcmetrics.MetricCollector, controllerName string) error + Deploy(ctx context.Context, stack core.Stack, metricsCollector lbcmetrics.MetricCollector, controllerName string, frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState) error } // NewDefaultStackDeployer constructs new defaultStackDeployer. @@ -49,6 +53,7 @@ func NewDefaultStackDeployer(cloud services.Cloud, k8sClient client.Client, elbv2LRManager: elbv2.NewDefaultListenerRuleManager(cloud.ELBV2(), trackingProvider, elbv2TaggingManager, config.ExternalManagedTags, config.FeatureGates, logger), elbv2TGManager: elbv2.NewDefaultTargetGroupManager(cloud.ELBV2(), trackingProvider, elbv2TaggingManager, cloud.VpcID(), config.ExternalManagedTags, logger), elbv2TGBManager: elbv2.NewDefaultTargetGroupBindingManager(k8sClient, trackingProvider, logger), + elbv2FrontendNlbTargetsManager: elbv2.NewFrontendNlbTargetsManager(cloud.ELBV2(), logger), wafv2WebACLAssociationManager: wafv2.NewDefaultWebACLAssociationManager(cloud.WAFv2(), logger), wafRegionalWebACLAssociationManager: wafregional.NewDefaultWebACLAssociationManager(cloud.WAFRegional(), logger), shieldProtectionManager: shield.NewDefaultProtectionManager(cloud.Shield(), logger), @@ -77,6 +82,7 @@ type defaultStackDeployer struct { elbv2LRManager elbv2.ListenerRuleManager elbv2TGManager elbv2.TargetGroupManager elbv2TGBManager elbv2.TargetGroupBindingManager + elbv2FrontendNlbTargetsManager elbv2.FrontendNlbTargetsManager wafv2WebACLAssociationManager wafv2.WebACLAssociationManager wafRegionalWebACLAssociationManager wafregional.WebACLAssociationManager shieldProtectionManager shield.ProtectionManager @@ -94,15 +100,21 @@ type ResourceSynthesizer interface { } // Deploy a resource stack. -func (d *defaultStackDeployer) Deploy(ctx context.Context, stack core.Stack, metricsCollector lbcmetrics.MetricCollector, controllerName string) error { +func (d *defaultStackDeployer) Deploy(ctx context.Context, stack core.Stack, metricsCollector lbcmetrics.MetricCollector, controllerName string, frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState) error { synthesizers := []ResourceSynthesizer{ ec2.NewSecurityGroupSynthesizer(d.cloud.EC2(), d.trackingProvider, d.ec2TaggingManager, d.ec2SGManager, d.vpcID, d.logger, stack), + } + + if controllerName == ingressController { + synthesizers = append(synthesizers, elbv2.NewFrontendNlbTargetSynthesizer(d.k8sClient, d.trackingProvider, d.elbv2TaggingManager, d.elbv2FrontendNlbTargetsManager, d.logger, d.featureGates, stack, frontendNlbTargetGroupDesiredState)) + } + + synthesizers = append(synthesizers, elbv2.NewTargetGroupSynthesizer(d.cloud.ELBV2(), d.trackingProvider, d.elbv2TaggingManager, d.elbv2TGManager, d.logger, d.featureGates, stack), elbv2.NewLoadBalancerSynthesizer(d.cloud.ELBV2(), d.trackingProvider, d.elbv2TaggingManager, d.elbv2LBManager, d.logger, d.featureGates, d.controllerConfig, stack), elbv2.NewListenerSynthesizer(d.cloud.ELBV2(), d.elbv2TaggingManager, d.elbv2LSManager, d.logger, stack), elbv2.NewListenerRuleSynthesizer(d.cloud.ELBV2(), d.elbv2TaggingManager, d.elbv2LRManager, d.logger, d.featureGates, stack), - elbv2.NewTargetGroupBindingSynthesizer(d.k8sClient, d.trackingProvider, d.elbv2TGBManager, d.logger, stack), - } + elbv2.NewTargetGroupBindingSynthesizer(d.k8sClient, d.trackingProvider, d.elbv2TGBManager, d.logger, stack)) if d.addonsConfig.WAFV2Enabled { synthesizers = append(synthesizers, wafv2.NewWebACLAssociationSynthesizer(d.wafv2WebACLAssociationManager, d.logger, stack)) diff --git a/pkg/ingress/model_build_frontend_nlb.go b/pkg/ingress/model_build_frontend_nlb.go new file mode 100644 index 0000000000..58c4824835 --- /dev/null +++ b/pkg/ingress/model_build_frontend_nlb.go @@ -0,0 +1,768 @@ +package ingress + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "strconv" + + awssdk "github.com/aws/aws-sdk-go-v2/aws" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/aws-load-balancer-controller/pkg/annotations" + "sigs.k8s.io/aws-load-balancer-controller/pkg/equality" + "sigs.k8s.io/aws-load-balancer-controller/pkg/k8s" + "sigs.k8s.io/aws-load-balancer-controller/pkg/model/core" + elbv2model "sigs.k8s.io/aws-load-balancer-controller/pkg/model/elbv2" + "sigs.k8s.io/aws-load-balancer-controller/pkg/networking" +) + +// FrontendNlbListenerConfig defines the configuration for an NLB listener +type FrontendNlbListenerConfig struct { + Protocol elbv2model.Protocol + Port int32 + TargetPort int32 + HealthCheckConfig elbv2model.TargetGroupHealthCheckConfig + HealthCheckConfigExplicit map[string]bool +} + +// FrontendNlbListenConfigWithIngress associates a listener config with its ingress resource +type FrontendNlbListenConfigWithIngress struct { + ingKey types.NamespacedName + FrontendNlbListenerConfig FrontendNlbListenerConfig +} + +// buildFrontendNlbModel constructs the frontend NLB model for the ingress +// It creates the load balancer, listeners, and target groups based on ingress configurations +func (t *defaultModelBuildTask) buildFrontendNlbModel(ctx context.Context, alb *elbv2model.LoadBalancer, listenerPortConfigByIngress map[types.NamespacedName]map[int32]listenPortConfig) error { + enableFrontendNlb, err := t.buildEnableFrontendNlbViaAnnotation(ctx) + if err != nil { + return err + } + + // If the annotation is not present or explicitly set to false, do not build the NLB model + if !enableFrontendNlb { + return nil + } + + scheme, err := t.buildFrontendNlbScheme(ctx, alb) + if err != nil { + return err + } + err = t.buildFrontendNlb(ctx, scheme, alb) + if err != nil { + return err + } + err = t.buildFrontendNlbListeners(ctx, listenerPortConfigByIngress) + if err != nil { + return err + } + return nil +} + +func (t *defaultModelBuildTask) buildEnableFrontendNlbViaAnnotation(ctx context.Context) (bool, error) { + var enableFrontendNlb *bool + for _, member := range t.ingGroup.Members { + rawEnableFrontendNlb := false + exists, err := t.annotationParser.ParseBoolAnnotation(annotations.IngressSuffixEnableFrontendNlb, &rawEnableFrontendNlb, member.Ing.Annotations) + if err != nil { + return false, err + } + + if exists { + if enableFrontendNlb == nil { + enableFrontendNlb = &rawEnableFrontendNlb + } else if *enableFrontendNlb != rawEnableFrontendNlb { + return false, errors.New("conflicting enable frontend NLB values") + } + + } + } + + if enableFrontendNlb == nil { + return false, nil + } + + return *enableFrontendNlb, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbScheme(ctx context.Context, alb *elbv2model.LoadBalancer) (elbv2model.LoadBalancerScheme, error) { + scheme, explicitSchemeSpecified, err := t.buildFrontendNlbSchemeViaAnnotation(ctx, alb) + if err != nil { + return alb.Spec.Scheme, err + } + if explicitSchemeSpecified { + return scheme, nil + } + + return t.defaultScheme, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbSubnetMappings(ctx context.Context, scheme elbv2model.LoadBalancerScheme) ([]elbv2model.SubnetMapping, error) { + var explicitSubnetNameOrIDsList [][]string + for _, member := range t.ingGroup.Members { + var rawSubnetNameOrIDs []string + if exists := t.annotationParser.ParseStringSliceAnnotation(annotations.IngressSuffixFrontendNlbSubnets, &rawSubnetNameOrIDs, member.Ing.Annotations); !exists { + continue + } + explicitSubnetNameOrIDsList = append(explicitSubnetNameOrIDsList, rawSubnetNameOrIDs) + } + + if len(explicitSubnetNameOrIDsList) != 0 { + chosenSubnetNameOrIDs := explicitSubnetNameOrIDsList[0] + for _, subnetNameOrIDs := range explicitSubnetNameOrIDsList[1:] { + if !cmp.Equal(chosenSubnetNameOrIDs, subnetNameOrIDs, equality.IgnoreStringSliceOrder()) { + return nil, errors.Errorf("conflicting subnets: %v | %v", chosenSubnetNameOrIDs, subnetNameOrIDs) + } + } + chosenSubnets, err := t.subnetsResolver.ResolveViaNameOrIDSlice(ctx, chosenSubnetNameOrIDs, + networking.WithSubnetsResolveLBType(elbv2model.LoadBalancerTypeNetwork), + networking.WithSubnetsResolveLBScheme(scheme), + ) + if err != nil { + return nil, err + } + + return buildFrontendNlbSubnetMappingsWithSubnets(chosenSubnets), nil + } + + return nil, nil + +} + +func buildFrontendNlbSubnetMappingsWithSubnets(subnets []ec2types.Subnet) []elbv2model.SubnetMapping { + subnetMappings := make([]elbv2model.SubnetMapping, 0, len(subnets)) + for _, subnet := range subnets { + subnetMappings = append(subnetMappings, elbv2model.SubnetMapping{ + SubnetID: awssdk.ToString(subnet.SubnetId), + }) + } + return subnetMappings +} + +func (t *defaultModelBuildTask) buildFrontendNlb(ctx context.Context, scheme elbv2model.LoadBalancerScheme, alb *elbv2model.LoadBalancer) error { + spec, err := t.buildFrontendNlbSpec(ctx, scheme, alb) + if err != nil { + return err + } + t.frontendNlb = elbv2model.NewLoadBalancer(t.stack, "FrontendNlb", spec) + + return nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbSpec(ctx context.Context, scheme elbv2model.LoadBalancerScheme, + alb *elbv2model.LoadBalancer) (elbv2model.LoadBalancerSpec, error) { + securityGroups, err := t.buildFrontendNlbSecurityGroups(ctx) + if err != nil { + return elbv2model.LoadBalancerSpec{}, err + } + + // use alb security group if it is not explicitly specified + if securityGroups == nil { + securityGroups = alb.Spec.SecurityGroups + } + + subnetMappings, err := t.buildFrontendNlbSubnetMappings(ctx, scheme) + if err != nil { + return elbv2model.LoadBalancerSpec{}, err + } + + // use alb subnetMappings if it is not explicitly specified + if subnetMappings == nil { + subnetMappings = alb.Spec.SubnetMappings + } + + name, err := t.buildFrontendNlbName(ctx, scheme, alb) + if err != nil { + return elbv2model.LoadBalancerSpec{}, err + } + + spec := elbv2model.LoadBalancerSpec{ + Name: name, + Type: elbv2model.LoadBalancerTypeNetwork, + Scheme: scheme, + IPAddressType: alb.Spec.IPAddressType, + SecurityGroups: securityGroups, + SubnetMappings: subnetMappings, + } + + return spec, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbName(_ context.Context, scheme elbv2model.LoadBalancerScheme, alb *elbv2model.LoadBalancer) (string, error) { + // build NLB name based upon ALB name + // keeping as much of the original name as possible while ensuring the "-nlb" suffix is always present and the total length never exceeds 32 characters. + if alb.Spec.Name != "" { + baseName := alb.Spec.Name + maxBaseLength := 28 + if len(baseName) > maxBaseLength { + baseName = baseName[:maxBaseLength] + } + nlbName := baseName + "-nlb" + + return nlbName, nil + } + + // Should not fall to this case, but keep it just in case + uuidHash := sha256.New() + _, _ = uuidHash.Write([]byte(t.clusterName)) + _, _ = uuidHash.Write([]byte(t.ingGroup.ID.String())) + _, _ = uuidHash.Write([]byte(scheme)) + uuid := hex.EncodeToString(uuidHash.Sum(nil)) + + if t.ingGroup.ID.IsExplicit() { + payload := invalidLoadBalancerNamePattern.ReplaceAllString(t.ingGroup.ID.Name, "") + return fmt.Sprintf("k8s-%.16s-%.8s-nlb", payload, uuid), nil + } + + sanitizedNamespace := invalidLoadBalancerNamePattern.ReplaceAllString(t.ingGroup.ID.Namespace, "") + sanitizedName := invalidLoadBalancerNamePattern.ReplaceAllString(t.ingGroup.ID.Name, "") + return fmt.Sprintf("k8s-%.8s-%.8s-%.6s-nlb", sanitizedNamespace, sanitizedName, uuid), nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbSecurityGroups(ctx context.Context) ([]core.StringToken, error) { + sgNameOrIDsViaAnnotation, err := t.buildNLBFrontendSGNameOrIDsFromAnnotation(ctx) + if err != nil { + return nil, err + } + + var lbSGTokens []core.StringToken + if len(sgNameOrIDsViaAnnotation) != 0 { + frontendSGIDs, err := t.sgResolver.ResolveViaNameOrID(ctx, sgNameOrIDsViaAnnotation) + + if err != nil { + return nil, err + } + for _, sgID := range frontendSGIDs { + lbSGTokens = append(lbSGTokens, core.LiteralStringToken(sgID)) + return lbSGTokens, nil + } + } + return nil, nil +} + +func (t *defaultModelBuildTask) buildNLBFrontendSGNameOrIDsFromAnnotation(ctx context.Context) ([]string, error) { + var explicitSGNameOrIDsList [][]string + for _, member := range t.ingGroup.Members { + var rawSGNameOrIDs []string + if exists := t.annotationParser.ParseStringSliceAnnotation(annotations.IngressSuffixFrontendNlbSecurityGroups, &rawSGNameOrIDs, member.Ing.Annotations); !exists { + continue + } + explicitSGNameOrIDsList = append(explicitSGNameOrIDsList, rawSGNameOrIDs) + } + if len(explicitSGNameOrIDsList) == 0 { + return nil, nil + } + chosenSGNameOrIDs := explicitSGNameOrIDsList[0] + for _, sgNameOrIDs := range explicitSGNameOrIDsList[1:] { + if !cmp.Equal(chosenSGNameOrIDs, sgNameOrIDs) { + return nil, errors.Errorf("conflicting securityGroups: %v | %v", chosenSGNameOrIDs, sgNameOrIDs) + } + } + return chosenSGNameOrIDs, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbListeners(ctx context.Context, listenerPortConfigByIngress map[types.NamespacedName]map[int32]listenPortConfig) error { + frontendNlbListenerConfigsByPort := make(map[int32][]FrontendNlbListenConfigWithIngress) + + // build frontend nlb config by port for ingress + for _, member := range t.ingGroup.Members { + ingKey := k8s.NamespacedName(member.Ing) + frontendNlbListenerConfigByPortForIngress, err := t.buildFrontendNlbListenerConfigByPortForIngress(ctx, &member, listenerPortConfigByIngress) + if err != nil { + return errors.Wrapf(err, "failed to compute listenPort config for ingress: %s", ingKey.String()) + } + for port, config := range frontendNlbListenerConfigByPortForIngress { + configWithIngress := FrontendNlbListenConfigWithIngress{ + ingKey: ingKey, + FrontendNlbListenerConfig: config, + } + frontendNlbListenerConfigsByPort[port] = append( + frontendNlbListenerConfigsByPort[port], + configWithIngress, + ) + } + } + + // merge frontend nlb listener configs + frontendNlbListenerConfigByPort := make(map[int32]FrontendNlbListenerConfig) + for port, cfgs := range frontendNlbListenerConfigsByPort { + mergedCfg, err := t.mergeFrontendNlbListenPortConfigs(ctx, cfgs) + if err != nil { + return errors.Wrapf(err, "failed to merge NLB listenPort config for port: %v", port) + } + frontendNlbListenerConfigByPort[port] = mergedCfg + } + + // build listener using the config + for port, cfg := range frontendNlbListenerConfigByPort { + _, err := t.buildFrontendNlbListener(ctx, port, cfg) + if err != nil { + return err + } + } + + return nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbListenerConfigByPortForIngress(ctx context.Context, ing *ClassifiedIngress, listenerPortConfigByIngress map[types.NamespacedName]map[int32]listenPortConfig) (map[int32]FrontendNlbListenerConfig, error) { + ingKey := k8s.NamespacedName(ing.Ing) + + frontendNlbListenerConfigByPort := make(map[int32]FrontendNlbListenerConfig) + + portMapping, err := t.parseFrontendNlbListenerPortMapping(ctx, ing.Ing.Annotations) + if err != nil { + return nil, err + } + + // Check if frontend-nlb-listener-port-mapping exists + if len(portMapping) > 0 { + //if exists: only create NLB listeners for explicitly mapped ALB listener ports + for nlbListenerPort, mappedAlbListenerPort := range portMapping { + + // check if the ALB listener port exists in the listener port set + if _, exists := listenerPortConfigByIngress[ingKey][mappedAlbListenerPort]; !exists { + t.logger.Info("Skipping NLB listener creation for unmapped ALB listener port", "mappedAlbListenerPort", mappedAlbListenerPort) + continue + } + + healthCheckConfig, isExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckConfig(ctx, ing.Ing.Annotations, "TCP") + if err != nil { + return nil, err + } + + frontendNlbListenerConfigByPort[nlbListenerPort] = FrontendNlbListenerConfig{ + Protocol: elbv2model.ProtocolTCP, + Port: nlbListenerPort, + TargetPort: mappedAlbListenerPort, + HealthCheckConfig: healthCheckConfig, + HealthCheckConfigExplicit: isExplicit, + } + } + + } else { + // if not: Map ALB listener ports directly to NLB listener ports + for albListenerPort := range listenerPortConfigByIngress[ingKey] { + + healthCheckConfig, isExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckConfig(ctx, ing.Ing.Annotations, "TCP") + if err != nil { + return nil, err + } + + // Add the listener configuration to the map + frontendNlbListenerConfigByPort[albListenerPort] = FrontendNlbListenerConfig{ + Protocol: elbv2model.ProtocolTCP, + Port: albListenerPort, + TargetPort: albListenerPort, + HealthCheckConfig: healthCheckConfig, + HealthCheckConfigExplicit: isExplicit, + } + } + } + + return frontendNlbListenerConfigByPort, nil +} + +func (t *defaultModelBuildTask) parseFrontendNlbListenerPortMapping(ctx context.Context, ingAnnotation map[string]string) (map[int32]int32, error) { + var rawPortMapping map[string]string + _, err := t.annotationParser.ParseStringMapAnnotation(annotations.IngressSuffixFrontendNlbListenerPortMapping, &rawPortMapping, ingAnnotation) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse frontend-nlb-listener-port-mapping for ingress %v", rawPortMapping) + } + + portMappping := make(map[int32]int32) + + for rawNlbPort, rawAlbPort := range rawPortMapping { + nlbPort, err := strconv.ParseInt(rawNlbPort, 10, 32) + if err != nil { + return nil, errors.Errorf("invalid NLB listener port: %s", rawNlbPort) + } + + albPort, err := strconv.ParseInt(rawAlbPort, 10, 32) + if err != nil { + return nil, errors.Errorf("invalid ALB listener port: %s", rawAlbPort) + } + + portMappping[int32(nlbPort)] = int32(albPort) + } + + return portMappping, nil + +} + +func (t *defaultModelBuildTask) mergeFrontendNlbListenPortConfigs(ctx context.Context, configs []FrontendNlbListenConfigWithIngress) (FrontendNlbListenerConfig, error) { + if len(configs) == 0 { + return FrontendNlbListenerConfig{}, errors.New("no NLB listener port configurations provided") + } + + // Initialize the final configuration + finalConfig := FrontendNlbListenerConfig{} + explicitFields := make(map[string]bool) + + // Port and Protocol are the same + finalConfig.Port = configs[0].FrontendNlbListenerConfig.Port + finalConfig.Protocol = configs[0].FrontendNlbListenerConfig.Protocol + + // Initialize the first Target port + finalConfig.TargetPort = configs[0].FrontendNlbListenerConfig.TargetPort + + // Iterate over all configurations to build the final configuration + for i, config := range configs { + healthCheckConfig := config.FrontendNlbListenerConfig.HealthCheckConfig + explicit := config.FrontendNlbListenerConfig.HealthCheckConfigExplicit + + // Merge intervalSeconds + err := mergeHealthCheckField("IntervalSeconds", &finalConfig.HealthCheckConfig.IntervalSeconds, healthCheckConfig.IntervalSeconds, explicit, explicitFields, i) + if err != nil { + return FrontendNlbListenerConfig{}, err + } + + // Merge timeoutSeconds + err = mergeHealthCheckField("TimeoutSeconds", &finalConfig.HealthCheckConfig.TimeoutSeconds, healthCheckConfig.TimeoutSeconds, explicit, explicitFields, i) + if err != nil { + return FrontendNlbListenerConfig{}, err + } + + // Merge healthyThresholdCount + err = mergeHealthCheckField("HealthyThresholdCount", &finalConfig.HealthCheckConfig.HealthyThresholdCount, healthCheckConfig.HealthyThresholdCount, explicit, explicitFields, i) + if err != nil { + return FrontendNlbListenerConfig{}, err + } + + // Merge unhealthyThresholdCount + err = mergeHealthCheckField("UnhealthyThresholdCount", &finalConfig.HealthCheckConfig.UnhealthyThresholdCount, healthCheckConfig.UnhealthyThresholdCount, explicit, explicitFields, i) + if err != nil { + return FrontendNlbListenerConfig{}, err + } + + // Merge protocol + if explicit["Protocol"] { + if explicitFields["Protocol"] { + if finalConfig.HealthCheckConfig.Protocol != healthCheckConfig.Protocol { + return FrontendNlbListenerConfig{}, errors.Errorf("conflicting Protocol, config %d: %s, previous: %s", + i+1, healthCheckConfig.Protocol, finalConfig.HealthCheckConfig.Protocol) + } + } else { + finalConfig.HealthCheckConfig.Protocol = healthCheckConfig.Protocol + explicitFields["Protocol"] = true + } + } else if !explicitFields["Protocol"] { + finalConfig.HealthCheckConfig.Protocol = healthCheckConfig.Protocol + } + + // Merge path + err = mergeHealthCheckField("Path", &finalConfig.HealthCheckConfig.Path, healthCheckConfig.Path, explicit, explicitFields, i) + if err != nil { + return FrontendNlbListenerConfig{}, err + } + + // Merge matcher + err = mergeHealthCheckField("Matcher", &finalConfig.HealthCheckConfig.Matcher, healthCheckConfig.Matcher, explicit, explicitFields, i) + if err != nil { + return FrontendNlbListenerConfig{}, err + } + + // Merge port + err = mergeHealthCheckField("Port", &finalConfig.HealthCheckConfig.Port, healthCheckConfig.Port, explicit, explicitFields, i) + if err != nil { + return FrontendNlbListenerConfig{}, err + } + + // Validate NLB-to-ALB port mappings to ensure each NLB listener port maps to exactly one ALB port, preventing connection collisions + if finalConfig.TargetPort != config.FrontendNlbListenerConfig.TargetPort { + return FrontendNlbListenerConfig{}, errors.Errorf("conflicting Target Port, config %d: %v, previous: %v", + i+1, config.FrontendNlbListenerConfig.TargetPort, finalConfig.TargetPort) + } else { + finalConfig.TargetPort = config.FrontendNlbListenerConfig.TargetPort + } + } + + return finalConfig, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbListener(ctx context.Context, port int32, config FrontendNlbListenerConfig) (*elbv2model.Listener, error) { + lsSpec, err := t.buildFrontendNlbListenerSpec(ctx, port, config) + if err != nil { + return nil, err + } + frontendNlbListenerResID := buildFrontendNlbResourceID("ls", config.Protocol, &port) + ls := elbv2model.NewListener(t.stack, frontendNlbListenerResID, lsSpec) + return ls, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbListenerSpec(ctx context.Context, port int32, config FrontendNlbListenerConfig) (elbv2model.ListenerSpec, error) { + listenerProtocol := elbv2model.Protocol(config.Protocol) + + targetGroup, err := t.buildFrontendNlbTargetGroup(ctx, port, config) + if err != nil { + return elbv2model.ListenerSpec{}, err + } + + defaultActions := t.buildFrontendNlbListenerDefaultActions(ctx, targetGroup) + + t.frontendNlbTargetGroupDesiredState.AddTargetGroup(targetGroup.Spec.Name, targetGroup.TargetGroupARN(), t.loadBalancer.LoadBalancerARN(), *targetGroup.Spec.Port, config.TargetPort) + + return elbv2model.ListenerSpec{ + LoadBalancerARN: t.frontendNlb.LoadBalancerARN(), + Port: port, + Protocol: listenerProtocol, + DefaultActions: defaultActions, + }, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbListenerDefaultActions(_ context.Context, targetGroup *elbv2model.TargetGroup) []elbv2model.Action { + return []elbv2model.Action{ + { + Type: elbv2model.ActionTypeForward, + ForwardConfig: &elbv2model.ForwardActionConfig{ + TargetGroups: []elbv2model.TargetGroupTuple{ + { + TargetGroupARN: targetGroup.TargetGroupARN(), + }, + }, + }, + }, + } +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroup(ctx context.Context, port int32, config FrontendNlbListenerConfig) (*elbv2model.TargetGroup, error) { + frontendNlbTgResID := buildFrontendNlbResourceID("tg", config.Protocol, &port) + tgSpec, err := t.buildFrontendNlbTargetGroupSpec(ctx, config.Protocol, port, &config.HealthCheckConfig) + if err != nil { + return nil, err + } + targetGroup := elbv2model.NewTargetGroup(t.stack, frontendNlbTgResID, tgSpec) + return targetGroup, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckConfig(ctx context.Context, svcAndIngAnnotations map[string]string, tgProtocol elbv2model.Protocol) (elbv2model.TargetGroupHealthCheckConfig, map[string]bool, error) { + isExplicit := make(map[string]bool) + + healthCheckPort, portExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckPort(ctx, svcAndIngAnnotations) + if err != nil { + return elbv2model.TargetGroupHealthCheckConfig{}, nil, err + } + isExplicit["Port"] = portExplicit + + healthCheckProtocol, protocolExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckProtocol(ctx, svcAndIngAnnotations, "HTTP") + if err != nil { + return elbv2model.TargetGroupHealthCheckConfig{}, nil, err + } + isExplicit["Protocol"] = protocolExplicit + + healthCheckPath, pathExplicit := t.buildFrontendNlbTargetGroupHealthCheckPath(ctx, svcAndIngAnnotations) + isExplicit["Path"] = pathExplicit + + healthCheckMatcher, matcherExplicit := t.buildFrontendNlbTargetGroupHealthCheckMatcher(ctx, svcAndIngAnnotations, elbv2model.ProtocolVersionHTTP1) + isExplicit["Matcher"] = matcherExplicit + + healthCheckIntervalSeconds, intervalExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckIntervalSeconds(ctx, svcAndIngAnnotations) + if err != nil { + return elbv2model.TargetGroupHealthCheckConfig{}, nil, err + } + isExplicit["IntervalSeconds"] = intervalExplicit + + healthCheckTimeoutSeconds, timeoutExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckTimeoutSeconds(ctx, svcAndIngAnnotations) + if err != nil { + return elbv2model.TargetGroupHealthCheckConfig{}, nil, err + } + isExplicit["TimeoutSeconds"] = timeoutExplicit + + healthCheckHealthyThresholdCount, healthyThresholdExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckHealthyThresholdCount(ctx, svcAndIngAnnotations) + if err != nil { + return elbv2model.TargetGroupHealthCheckConfig{}, nil, err + } + isExplicit["HealthyThresholdCount"] = healthyThresholdExplicit + + healthCheckUnhealthyThresholdCount, unhealthyThresholdExplicit, err := t.buildFrontendNlbTargetGroupHealthCheckUnhealthyThresholdCount(ctx, svcAndIngAnnotations) + if err != nil { + return elbv2model.TargetGroupHealthCheckConfig{}, nil, err + } + isExplicit["UnhealthyThresholdCount"] = unhealthyThresholdExplicit + + return elbv2model.TargetGroupHealthCheckConfig{ + Port: &healthCheckPort, + Protocol: healthCheckProtocol, + Path: &healthCheckPath, + Matcher: &healthCheckMatcher, + IntervalSeconds: awssdk.Int32(healthCheckIntervalSeconds), + TimeoutSeconds: awssdk.Int32(healthCheckTimeoutSeconds), + HealthyThresholdCount: awssdk.Int32(healthCheckHealthyThresholdCount), + UnhealthyThresholdCount: awssdk.Int32(healthCheckUnhealthyThresholdCount), + }, isExplicit, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckPort(_ context.Context, svcAndIngAnnotations map[string]string) (intstr.IntOrString, bool, error) { + rawHealthCheckPort := "" + exists := t.annotationParser.ParseStringAnnotation(annotations.IngressSuffixFrontendNlbHealthCheckPort, &rawHealthCheckPort, svcAndIngAnnotations) + if !exists { + return intstr.FromString(healthCheckPortTrafficPort), false, nil + } + if rawHealthCheckPort == healthCheckPortTrafficPort { + return intstr.FromString(healthCheckPortTrafficPort), true, nil + } + healthCheckPort := intstr.Parse(rawHealthCheckPort) + if healthCheckPort.Type == intstr.Int { + return healthCheckPort, true, nil + } + + return intstr.IntOrString{}, true, errors.New("failed to resolve healthCheckPort") +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckProtocol(_ context.Context, svcAndIngAnnotations map[string]string, tgProtocol elbv2model.Protocol) (elbv2model.Protocol, bool, error) { + rawHealthCheckProtocol := string(tgProtocol) + exists := t.annotationParser.ParseStringAnnotation(annotations.IngressSuffixFrontendNlbHealthCheckProtocol, &rawHealthCheckProtocol, svcAndIngAnnotations) + switch rawHealthCheckProtocol { + case string(elbv2model.ProtocolHTTP): + return elbv2model.ProtocolHTTP, exists, nil + case string(elbv2model.ProtocolHTTPS): + return elbv2model.ProtocolHTTPS, exists, nil + default: + return "", exists, errors.Errorf("healthCheckProtocol must be within [%v, %v]", elbv2model.ProtocolHTTP, elbv2model.ProtocolHTTPS) + } +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckPath(_ context.Context, svcAndIngAnnotations map[string]string) (string, bool) { + rawHealthCheckPath := t.defaultHealthCheckPathHTTP + exists := t.annotationParser.ParseStringAnnotation(annotations.IngressSuffixFrontendNlbHealthCheckPath, &rawHealthCheckPath, svcAndIngAnnotations) + return rawHealthCheckPath, exists +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckMatcher(_ context.Context, svcAndIngAnnotations map[string]string, tgProtocolVersion elbv2model.ProtocolVersion) (elbv2model.HealthCheckMatcher, bool) { + rawHealthCheckMatcherHTTPCode := t.defaultHealthCheckMatcherHTTPCode + exists := t.annotationParser.ParseStringAnnotation(annotations.IngressSuffixFrontendNlbHealthCheckSuccessCodes, &rawHealthCheckMatcherHTTPCode, svcAndIngAnnotations) + return elbv2model.HealthCheckMatcher{ + HTTPCode: &rawHealthCheckMatcherHTTPCode, + }, exists +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckIntervalSeconds(_ context.Context, svcAndIngAnnotations map[string]string) (int32, bool, error) { + rawHealthCheckIntervalSeconds := t.defaultHealthCheckIntervalSeconds + exists, err := t.annotationParser.ParseInt32Annotation(annotations.IngressSuffixFrontendNlbHealthCheckIntervalSeconds, &rawHealthCheckIntervalSeconds, svcAndIngAnnotations) + if err != nil { + return 0, false, err + } + return rawHealthCheckIntervalSeconds, exists, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckTimeoutSeconds(_ context.Context, svcAndIngAnnotations map[string]string) (int32, bool, error) { + rawHealthCheckTimeoutSeconds := t.defaultHealthCheckTimeoutSeconds + exists, err := t.annotationParser.ParseInt32Annotation(annotations.IngressSuffixFrontendNlbHealthCheckTimeoutSeconds, &rawHealthCheckTimeoutSeconds, svcAndIngAnnotations) + if err != nil { + return 0, false, err + } + return rawHealthCheckTimeoutSeconds, exists, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckHealthyThresholdCount(_ context.Context, svcAndIngAnnotations map[string]string) (int32, bool, error) { + rawHealthCheckHealthyThresholdCount := t.defaultHealthCheckHealthyThresholdCount + exists, err := t.annotationParser.ParseInt32Annotation(annotations.IngressSuffixFrontendNlbHealthCheckHealthyThresholdCount, + &rawHealthCheckHealthyThresholdCount, svcAndIngAnnotations) + if err != nil { + return 0, false, err + } + return rawHealthCheckHealthyThresholdCount, exists, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupHealthCheckUnhealthyThresholdCount(_ context.Context, svcAndIngAnnotations map[string]string) (int32, bool, error) { + rawHealthCheckUnhealthyThresholdCount := t.defaultHealthCheckUnhealthyThresholdCount + exists, err := t.annotationParser.ParseInt32Annotation(annotations.IngressSuffixFrontendNlHealthCheckbUnhealthyThresholdCount, + &rawHealthCheckUnhealthyThresholdCount, svcAndIngAnnotations) + if err != nil { + return 0, false, err + } + return rawHealthCheckUnhealthyThresholdCount, exists, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupSpec(ctx context.Context, tgProtocol elbv2model.Protocol, + port int32, healthCheckConfig *elbv2model.TargetGroupHealthCheckConfig) (elbv2model.TargetGroupSpec, error) { + + tgName := t.buildFrontendNlbTargetGroupName(ctx, port, elbv2model.TargetTypeALB, tgProtocol, healthCheckConfig) + + return elbv2model.TargetGroupSpec{ + Name: tgName, + TargetType: elbv2model.TargetTypeALB, + Port: awssdk.Int32(port), + Protocol: tgProtocol, + IPAddressType: elbv2model.TargetGroupIPAddressType(t.loadBalancer.Spec.IPAddressType), + HealthCheckConfig: healthCheckConfig, + }, nil +} + +func (t *defaultModelBuildTask) buildFrontendNlbTargetGroupName(_ context.Context, tgPort int32, + targetType elbv2model.TargetType, tgProtocol elbv2model.Protocol, hc *elbv2model.TargetGroupHealthCheckConfig) string { + uuidHash := sha256.New() + _, _ = uuidHash.Write([]byte(t.clusterName)) + _, _ = uuidHash.Write([]byte(t.ingGroup.ID.String())) + _, _ = uuidHash.Write([]byte(strconv.Itoa(int(tgPort)))) + _, _ = uuidHash.Write([]byte(targetType)) + _, _ = uuidHash.Write([]byte(tgProtocol)) + _, _ = uuidHash.Write([]byte(hc.Port.String())) + _, _ = uuidHash.Write([]byte(hc.Protocol)) + uuid := hex.EncodeToString(uuidHash.Sum(nil)) + + if t.ingGroup.ID.IsExplicit() { + sanitizedName := invalidLoadBalancerNamePattern.ReplaceAllString(t.ingGroup.ID.Name, "") + return fmt.Sprintf("k8s-%.17s-%.10s", sanitizedName, uuid) + } + + sanitizedNamespace := invalidTargetGroupNamePattern.ReplaceAllString(t.ingGroup.ID.Namespace, "") + sanitizedName := invalidTargetGroupNamePattern.ReplaceAllString(t.ingGroup.ID.Name, "") + return fmt.Sprintf("k8s-%.8s-%.8s-%.10s", sanitizedNamespace, sanitizedName, uuid) +} + +func (t *defaultModelBuildTask) buildFrontendNlbSchemeViaAnnotation(ctx context.Context, alb *elbv2model.LoadBalancer) (elbv2model.LoadBalancerScheme, bool, error) { + explicitSchemes := sets.Set[string]{} + for _, member := range t.ingGroup.Members { + rawSchema := "" + if exists := t.annotationParser.ParseStringAnnotation(annotations.IngressSuffixFrontendNlbScheme, &rawSchema, member.Ing.Annotations); !exists { + continue + } + explicitSchemes.Insert(rawSchema) + } + if len(explicitSchemes) == 0 { + return alb.Spec.Scheme, false, nil + } + if len(explicitSchemes) > 1 { + return "", true, errors.Errorf("conflicting scheme: %v", explicitSchemes) + } + rawScheme, _ := explicitSchemes.PopAny() + switch rawScheme { + case string(elbv2model.LoadBalancerSchemeInternetFacing): + return elbv2model.LoadBalancerSchemeInternetFacing, true, nil + case string(elbv2model.LoadBalancerSchemeInternal): + return elbv2model.LoadBalancerSchemeInternal, true, nil + default: + return "", false, errors.Errorf("unknown scheme: %v", rawScheme) + } +} + +func buildFrontendNlbResourceID(resourceType string, protocol elbv2model.Protocol, port *int32) string { + if port != nil && protocol != "" { + return fmt.Sprintf("FrontendNlb-%s-%v-%v", resourceType, protocol, *port) + } + return fmt.Sprintf("FrontendNlb") +} + +func mergeHealthCheckField[T comparable](fieldName string, finalValue **T, currentValue *T, explicit map[string]bool, explicitFields map[string]bool, configIndex int) error { + if explicit[fieldName] { + if explicitFields[fieldName] { + fmt.Printf("*finalValue (%T): %v\n", **finalValue, **finalValue) + fmt.Printf("newValue (%T): %v\n", *currentValue, *currentValue) + if **finalValue != *currentValue { + return errors.Errorf("conflicting %s, config %d: %v, previous: %v", fieldName, configIndex+1, *currentValue, **finalValue) + } + } else { + *finalValue = currentValue + explicitFields[fieldName] = true + } + } else if !explicitFields[fieldName] { + *finalValue = currentValue + } + return nil +} diff --git a/pkg/ingress/model_build_frontend_nlb_test.go b/pkg/ingress/model_build_frontend_nlb_test.go new file mode 100644 index 0000000000..ef7c740957 --- /dev/null +++ b/pkg/ingress/model_build_frontend_nlb_test.go @@ -0,0 +1,966 @@ +package ingress + +import ( + "context" + "testing" + + awssdk "github.com/aws/aws-sdk-go-v2/aws" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/go-logr/logr" + gomock "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + networking "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/aws-load-balancer-controller/pkg/annotations" + "sigs.k8s.io/aws-load-balancer-controller/pkg/aws/services" + "sigs.k8s.io/aws-load-balancer-controller/pkg/model/core" + "sigs.k8s.io/aws-load-balancer-controller/pkg/model/elbv2" + elbv2model "sigs.k8s.io/aws-load-balancer-controller/pkg/model/elbv2" + networking2 "sigs.k8s.io/aws-load-balancer-controller/pkg/networking" + networkingpkg "sigs.k8s.io/aws-load-balancer-controller/pkg/networking" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func Test_defaultModelBuildTask_buildFrontendNlbSecurityGroups(t *testing.T) { + type describeSecurityGroupsResult struct { + securityGroups []ec2types.SecurityGroup + err error + } + + type fields struct { + ingGroup Group + scheme elbv2.LoadBalancerScheme + describeSecurityGroupsResult []describeSecurityGroupsResult + } + + tests := []struct { + name string + fields fields + wantSGTokens []core.StringToken + wantErr string + }{ + { + name: "with no annotations", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{}, + }, + }, + }, + }, + }, + scheme: elbv2.LoadBalancerSchemeInternal, + }, + }, + { + name: "with annotations", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-security-groups": "sg-manual", + }, + }, + }, + }, + }, + }, + scheme: elbv2.LoadBalancerSchemeInternal, + describeSecurityGroupsResult: []describeSecurityGroupsResult{ + { + securityGroups: []ec2types.SecurityGroup{ + { + GroupId: awssdk.String("sg-manual"), + }, + }, + }, + }, + }, + wantSGTokens: []core.StringToken{core.LiteralStringToken("sg-manual")}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockEC2 := services.NewMockEC2(ctrl) + vpcID := "vpc-dummy" + for _, res := range tt.fields.describeSecurityGroupsResult { + mockEC2.EXPECT().DescribeSecurityGroupsAsList(gomock.Any(), gomock.Any()).Return(res.securityGroups, res.err) + } + sgResolver := networkingpkg.NewDefaultSecurityGroupResolver(mockEC2, vpcID) + + annotationParser := annotations.NewSuffixAnnotationParser("alb.ingress.kubernetes.io") + task := &defaultModelBuildTask{ + sgResolver: sgResolver, + ingGroup: tt.fields.ingGroup, + annotationParser: annotationParser, + } + + got, err := task.buildFrontendNlbSecurityGroups(context.Background()) + + if err != nil { + assert.EqualError(t, err, tt.wantErr) + } else { + + var gotSGTokens []core.StringToken + for _, sgToken := range got { + gotSGTokens = append(gotSGTokens, sgToken) + } + assert.Equal(t, tt.wantSGTokens, gotSGTokens) + } + }) + } +} + +func Test_buildFrontendNlbSubnetMappings(t *testing.T) { + + type fields struct { + ingGroup Group + scheme elbv2.LoadBalancerScheme + } + + tests := []struct { + name string + fields fields + want []string + wantErr string + }{ + { + name: "no annotation implicit subnets", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{}, + }, + }, + }, + }, + }, + scheme: elbv2.LoadBalancerSchemeInternal, + }, + }, + { + name: "with subnets annoattion", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-2", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-subnets": "subnet-1,subnet-2", + }, + }, + }, + }, + }, + }, + scheme: elbv2.LoadBalancerSchemeInternal, + }, + want: []string{"subnet-1", "subnet-2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockEC2 := services.NewMockEC2(ctrl) + mockEC2.EXPECT().DescribeSubnetsAsList(gomock.Any(), gomock.Any()). + DoAndReturn(stubDescribeSubnetsAsList). + AnyTimes() + + azInfoProvider := networking2.NewMockAZInfoProvider(ctrl) + azInfoProvider.EXPECT().FetchAZInfos(gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, availabilityZoneIDs []string) (map[string]ec2types.AvailabilityZone, error) { + ret := make(map[string]ec2types.AvailabilityZone, len(availabilityZoneIDs)) + for _, id := range availabilityZoneIDs { + ret[id] = ec2types.AvailabilityZone{ZoneType: awssdk.String("availability-zone")} + } + return ret, nil + }).AnyTimes() + + subnetsResolver := networking2.NewDefaultSubnetsResolver( + azInfoProvider, + mockEC2, + "vpc-1", + "test-cluster", + true, + true, + true, + logr.New(&log.NullLogSink{}), + ) + + annotationParser := annotations.NewSuffixAnnotationParser("alb.ingress.kubernetes.io") + task := &defaultModelBuildTask{ + ingGroup: tt.fields.ingGroup, + annotationParser: annotationParser, + subnetsResolver: subnetsResolver, + } + got, err := task.buildFrontendNlbSubnetMappings(context.Background(), tt.fields.scheme) + + if err != nil { + assert.EqualError(t, err, tt.wantErr) + } else { + + var gotSubnets []string + for _, mapping := range got { + gotSubnets = append(gotSubnets, mapping.SubnetID) + } + assert.Equal(t, tt.want, gotSubnets) + } + }) + } +} + +func Test_buildFrontendNlbName(t *testing.T) { + tests := []struct { + name string + clusterName string + namespace string + ingName string + scheme elbv2model.LoadBalancerScheme + wantPrefix string + alb *elbv2model.LoadBalancer + }{ + { + name: "standard case", + clusterName: "test-cluster", + namespace: "awesome-ns", + ingName: "my-ingress", + scheme: elbv2model.LoadBalancerSchemeInternal, + wantPrefix: "test-alb", + alb: &elbv2model.LoadBalancer{ + Spec: elbv2model.LoadBalancerSpec{ + Name: "test-alb", + }, + }, + }, + { + name: "with special characters", + clusterName: "test-cluster", + namespace: "awesome-ns", + ingName: "my-ingress-1", + scheme: elbv2model.LoadBalancerSchemeInternal, + wantPrefix: "k8s-awesomen-myingr", + alb: &elbv2model.LoadBalancer{ + Spec: elbv2model.LoadBalancerSpec{}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + task := &defaultModelBuildTask{ + clusterName: tt.clusterName, + ingGroup: Group{ + ID: GroupID{ + Namespace: tt.namespace, + Name: tt.ingName, + }, + }, + } + + got, err := task.buildFrontendNlbName(context.Background(), tt.scheme, tt.alb) + assert.NoError(t, err) + assert.Contains(t, got, tt.wantPrefix) + + }) + } +} + +func Test_buildFrontendNLBTargetGroupName(t *testing.T) { + tests := []struct { + name string + clusterName string + namespace string + ingName string + port int32 + svcPort intstr.IntOrString + targetType elbv2model.TargetType + protocol elbv2model.Protocol + wantPrefix string + }{ + { + name: "standard case", + clusterName: "test-cluster", + namespace: "default", + ingName: "my-ingress", + port: 80, + svcPort: intstr.FromInt(80), + targetType: "alb", + protocol: elbv2model.ProtocolTCP, + wantPrefix: "k8s-default-mying", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + task := &defaultModelBuildTask{ + clusterName: tt.clusterName, + loadBalancer: &elbv2model.LoadBalancer{ + Spec: elbv2model.LoadBalancerSpec{ + Scheme: elbv2.LoadBalancerSchemeInternetFacing, + }, + }, + ingGroup: Group{ + ID: GroupID{ + Namespace: tt.namespace, + Name: tt.ingName, + }, + }, + } + + port80 := intstr.FromInt(80) + + healthCheckConfig := &elbv2model.TargetGroupHealthCheckConfig{ + Protocol: elbv2model.ProtocolTCP, + Port: &port80, + } + + got := task.buildFrontendNlbTargetGroupName( + context.Background(), + tt.port, + tt.targetType, + tt.protocol, + healthCheckConfig, + ) + + assert.Contains(t, got, tt.wantPrefix) + + }) + } +} + +func Test_buildFrontendNlbSchemeViaAnnotation(t *testing.T) { + tests := []struct { + name string + annotations map[string]string + defaultScheme elbv2model.LoadBalancerScheme + wantScheme elbv2model.LoadBalancerScheme + wantExplicit bool + wantErr bool + }{ + { + name: "explicit internal scheme", + annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-scheme": "internal", + }, + defaultScheme: elbv2model.LoadBalancerSchemeInternetFacing, + wantScheme: elbv2model.LoadBalancerSchemeInternal, + wantExplicit: true, + wantErr: false, + }, + { + name: "explicit internet-facing scheme", + annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-scheme": "internet-facing", + }, + defaultScheme: elbv2model.LoadBalancerSchemeInternal, + wantScheme: elbv2model.LoadBalancerSchemeInternetFacing, + wantExplicit: true, + wantErr: false, + }, + { + name: "no annotation - use default", + annotations: map[string]string{}, + defaultScheme: elbv2model.LoadBalancerSchemeInternal, + wantScheme: elbv2model.LoadBalancerSchemeInternal, + wantExplicit: false, + wantErr: false, + }, + { + name: "invalid scheme", + annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-scheme": "invalid", + }, + defaultScheme: elbv2model.LoadBalancerSchemeInternal, + wantScheme: "", + wantExplicit: false, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ing := &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: tt.annotations, + }, + } + + task := &defaultModelBuildTask{ + ingGroup: Group{ + Members: []ClassifiedIngress{ + { + Ing: ing, + }, + }, + }, + annotationParser: annotations.NewSuffixAnnotationParser("alb.ingress.kubernetes.io"), + } + + alb := &elbv2model.LoadBalancer{ + Spec: elbv2model.LoadBalancerSpec{ + Scheme: tt.defaultScheme, + }, + } + + gotScheme, gotExplicit, err := task.buildFrontendNlbSchemeViaAnnotation(context.Background(), alb) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.wantScheme, gotScheme) + assert.Equal(t, tt.wantExplicit, gotExplicit) + } + }) + } +} + +func Test_buildEnableFrontendNlbViaAnnotation(t *testing.T) { + + type fields struct { + ingGroup Group + } + + tests := []struct { + name string + fields fields + wantEnabled bool + wantErr bool + expectedErrMsg string + }{ + { + name: "single ingress with enable annotation true", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/enable-frontend-nlb": "true", + }, + }, + }, + }, + }, + }, + }, + wantEnabled: true, + wantErr: false, + }, + { + name: "single ingress without annotation", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{}, + }, + }, + }, + }, + }, + }, + wantEnabled: false, + wantErr: false, + }, + { + name: "multiple ingresses with same enable value", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/enable-frontend-nlb": "true", + }, + }, + }, + }, + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-2", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/enable-frontend-nlb": "true", + }, + }, + }, + }, + }, + }, + }, + wantEnabled: true, + wantErr: false, + }, + { + name: "multiple ingresses with conflicting enable values", + fields: fields{ + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/enable-frontend-nlb": "true", + }, + }, + }, + }, + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-2", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/enable-frontend-nlb": "false", + }, + }, + }, + }, + }, + }, + }, + wantEnabled: false, + wantErr: true, + expectedErrMsg: "conflicting enable frontend NLB", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + task := &defaultModelBuildTask{ + ingGroup: tt.fields.ingGroup, + annotationParser: annotations.NewSuffixAnnotationParser("alb.ingress.kubernetes.io"), + } + + got, err := task.buildEnableFrontendNlbViaAnnotation(context.Background()) + + if tt.wantErr { + assert.Error(t, err) + if tt.expectedErrMsg != "" { + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } + return + } + + assert.NoError(t, err) + assert.Equal(t, tt.wantEnabled, got) + }) + } +} + +func Test_mergeFrontendNlbListenPortConfigs(t *testing.T) { + tests := []struct { + name string + configs []FrontendNlbListenConfigWithIngress + expectedConfig FrontendNlbListenerConfig + wantErr bool + expectedErrMsg string + }{ + { + name: "valid config with no conflicts", + configs: []FrontendNlbListenConfigWithIngress{ + { + ingKey: types.NamespacedName{Namespace: "awesome-ns", Name: "ingress-1"}, + FrontendNlbListenerConfig: FrontendNlbListenerConfig{ + Port: 80, + Protocol: elbv2model.ProtocolTCP, + TargetPort: 80, + HealthCheckConfig: elbv2model.TargetGroupHealthCheckConfig{ + IntervalSeconds: awssdk.Int32(10), + TimeoutSeconds: awssdk.Int32(5), + }, + }, + }, + }, + expectedConfig: FrontendNlbListenerConfig{ + Port: 80, + Protocol: elbv2model.ProtocolTCP, + TargetPort: 80, + HealthCheckConfig: elbv2model.TargetGroupHealthCheckConfig{ + IntervalSeconds: awssdk.Int32(10), + TimeoutSeconds: awssdk.Int32(5), + }, + }, + wantErr: false, + }, + { + name: "conflicting health check interval", + configs: []FrontendNlbListenConfigWithIngress{ + { + ingKey: types.NamespacedName{Namespace: "awesome-ns", Name: "ingress-1"}, + FrontendNlbListenerConfig: FrontendNlbListenerConfig{ + Port: 80, + Protocol: elbv2model.ProtocolTCP, + HealthCheckConfig: elbv2model.TargetGroupHealthCheckConfig{ + IntervalSeconds: awssdk.Int32(10), + }, + HealthCheckConfigExplicit: map[string]bool{ + "IntervalSeconds": true, + }, + }, + }, + { + ingKey: types.NamespacedName{Namespace: "awesome-ns", Name: "ingress-2"}, + FrontendNlbListenerConfig: FrontendNlbListenerConfig{ + Port: 80, + Protocol: elbv2model.ProtocolTCP, + HealthCheckConfig: elbv2model.TargetGroupHealthCheckConfig{ + IntervalSeconds: awssdk.Int32(15), + }, + HealthCheckConfigExplicit: map[string]bool{ + "IntervalSeconds": true, + }, + }, + }, + }, + wantErr: true, + expectedErrMsg: "conflicting IntervalSeconds", + }, + { + name: "valid multiple configs with same values", + configs: []FrontendNlbListenConfigWithIngress{ + { + ingKey: types.NamespacedName{Namespace: "awesome-ns", Name: "ingress-1"}, + FrontendNlbListenerConfig: FrontendNlbListenerConfig{ + Port: 80, + Protocol: elbv2model.ProtocolTCP, + HealthCheckConfig: elbv2model.TargetGroupHealthCheckConfig{ + IntervalSeconds: awssdk.Int32(10), + TimeoutSeconds: awssdk.Int32(5), + }, + TargetPort: 80, + HealthCheckConfigExplicit: map[string]bool{ + "IntervalSeconds": true, + "TimeoutSeconds": true, + }, + }, + }, + { + ingKey: types.NamespacedName{Namespace: "awesome-ns", Name: "ingress-2"}, + FrontendNlbListenerConfig: FrontendNlbListenerConfig{ + Port: 80, + Protocol: elbv2model.ProtocolTCP, + HealthCheckConfig: elbv2model.TargetGroupHealthCheckConfig{ + IntervalSeconds: awssdk.Int32(10), + TimeoutSeconds: awssdk.Int32(5), + }, + TargetPort: 80, + HealthCheckConfigExplicit: map[string]bool{ + "IntervalSeconds": true, + "TimeoutSeconds": true, + }, + }, + }, + }, + expectedConfig: FrontendNlbListenerConfig{ + Port: 80, + Protocol: elbv2model.ProtocolTCP, + HealthCheckConfig: elbv2model.TargetGroupHealthCheckConfig{ + IntervalSeconds: awssdk.Int32(10), + TimeoutSeconds: awssdk.Int32(5), + }, + TargetPort: 80, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + task := &defaultModelBuildTask{} + got, err := task.mergeFrontendNlbListenPortConfigs(context.Background(), tt.configs) + + if tt.wantErr { + assert.Error(t, err) + if tt.expectedErrMsg != "" { + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedConfig, got) + } + }) + } +} + +func Test_defaultModelBuildTask_buildFrontendNlbListeners(t *testing.T) { + tests := []struct { + name string + ingGroup Group + albListenerPorts []int32 + listenerPortConfigByIngress map[types.NamespacedName]map[int32]listenPortConfig + loadBalancer *elbv2model.LoadBalancer + wantErr bool + expectedErrMsg string + }{ + { + name: "valid listener configurations", + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-1", + Annotations: map[string]string{"alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping": "80=443"}, + }, + }, + }, + }, + }, + listenerPortConfigByIngress: map[types.NamespacedName]map[int32]listenPortConfig{ + {Namespace: "awesome-ns", Name: "ing-1"}: { + 443: listenPortConfig{}, + }, + }, + loadBalancer: &elbv2model.LoadBalancer{ + Spec: elbv2model.LoadBalancerSpec{ + IPAddressType: elbv2model.IPAddressTypeIPV4, + }, + }, + wantErr: false, + }, + { + name: "conflicting listener configurations", + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-2", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping": "80=443", + }, + }, + }}, + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-3", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping": "80=8443", + }}, + }, + }, + }, + }, + listenerPortConfigByIngress: map[types.NamespacedName]map[int32]listenPortConfig{ + {Namespace: "awesome-ns", Name: "ing-2"}: { + 443: listenPortConfig{}, + }, + {Namespace: "awesome-ns", Name: "ing-3"}: { + 8443: listenPortConfig{}, + }, + }, + wantErr: true, + expectedErrMsg: "failed to merge NLB listenPort config for port: 80", + }, + { + name: "valid listener configurations for multiple ingress", + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-4", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping": "80=443", + }, + }, + }}, + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-5", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping": "8080=80", + }}, + }, + }, + }, + }, + listenerPortConfigByIngress: map[types.NamespacedName]map[int32]listenPortConfig{ + {Namespace: "awesome-ns", Name: "ing-4"}: { + 443: listenPortConfig{}, + }, + {Namespace: "awesome-ns", Name: "ing-5"}: { + 80: listenPortConfig{}, + }, + }, + loadBalancer: &elbv2model.LoadBalancer{ + Spec: elbv2model.LoadBalancerSpec{ + IPAddressType: elbv2model.IPAddressTypeIPV4, + }, + }, + wantErr: false, + }, + { + name: "valid listener configurations for multiple ingress, 8443 is ingnored", + ingGroup: Group{ + ID: GroupID{ + Namespace: "awesome-ns", + Name: "my-ingress", + }, + Members: []ClassifiedIngress{ + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-6", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping": "80=443", + }, + }, + }}, + { + Ing: &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "awesome-ns", + Name: "ing-7", + Annotations: map[string]string{ + "alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping": "80=8443", + }}, + }, + }, + }, + }, + listenerPortConfigByIngress: map[types.NamespacedName]map[int32]listenPortConfig{ + {Namespace: "awesome-ns", Name: "ing-6"}: { + 443: listenPortConfig{}, + }, + {Namespace: "awesome-ns", Name: "ing-7"}: {}, + }, + loadBalancer: &elbv2model.LoadBalancer{ + Spec: elbv2model.LoadBalancerSpec{ + IPAddressType: elbv2model.IPAddressTypeIPV4, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + stack := core.NewDefaultStack(core.StackID{Name: "awesome-stack"}) + desiredState := &core.FrontendNlbTargetGroupDesiredState{ + TargetGroups: make(map[string]*core.FrontendNlbTargetGroupState), + } + mockLoadBalancer := elbv2model.NewLoadBalancer(stack, "FrontendNlb", elbv2model.LoadBalancerSpec{ + IPAddressType: elbv2model.IPAddressTypeIPV4, + }) + + task := &defaultModelBuildTask{ + ingGroup: tt.ingGroup, + annotationParser: annotations.NewSuffixAnnotationParser("alb.ingress.kubernetes.io"), + loadBalancer: tt.loadBalancer, + frontendNlb: mockLoadBalancer, + stack: stack, + frontendNlbTargetGroupDesiredState: desiredState, + } + + err := task.buildFrontendNlbListeners(context.Background(), tt.listenerPortConfigByIngress) + + if tt.wantErr { + assert.Error(t, err) + if tt.expectedErrMsg != "" { + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/pkg/ingress/model_builder.go b/pkg/ingress/model_builder.go index 1e341f24c1..7273c3884a 100644 --- a/pkg/ingress/model_builder.go +++ b/pkg/ingress/model_builder.go @@ -37,7 +37,7 @@ const ( // ModelBuilder is responsible for build mode stack for a IngressGroup. type ModelBuilder interface { // build mode stack for a IngressGroup. - Build(ctx context.Context, ingGroup Group, metricsCollector lbcmetrics.MetricCollector) (core.Stack, *elbv2model.LoadBalancer, []types.NamespacedName, bool, error) + Build(ctx context.Context, ingGroup Group, metricsCollector lbcmetrics.MetricCollector) (core.Stack, *elbv2model.LoadBalancer, []types.NamespacedName, bool, *core.FrontendNlbTargetGroupDesiredState, *elbv2model.LoadBalancer, error) } // NewDefaultModelBuilder constructs new defaultModelBuilder. @@ -121,8 +121,10 @@ type defaultModelBuilder struct { } // build mode stack for a IngressGroup. -func (b *defaultModelBuilder) Build(ctx context.Context, ingGroup Group, metricsCollector lbcmetrics.MetricCollector) (core.Stack, *elbv2model.LoadBalancer, []types.NamespacedName, bool, error) { +func (b *defaultModelBuilder) Build(ctx context.Context, ingGroup Group, metricsCollector lbcmetrics.MetricCollector) (core.Stack, *elbv2model.LoadBalancer, []types.NamespacedName, bool, *core.FrontendNlbTargetGroupDesiredState, *elbv2model.LoadBalancer, error) { stack := core.NewDefaultStack(core.StackID(ingGroup.ID)) + frontendNlbTargetGroupDesiredState := core.NewFrontendNlbTargetGroupDesiredState() + task := &defaultModelBuildTask{ k8sClient: b.k8sClient, eventRecorder: b.eventRecorder, @@ -148,8 +150,9 @@ func (b *defaultModelBuilder) Build(ctx context.Context, ingGroup Group, metrics enableIPTargetType: b.enableIPTargetType, metricsCollector: b.metricsCollector, - ingGroup: ingGroup, - stack: stack, + ingGroup: ingGroup, + stack: stack, + frontendNlbTargetGroupDesiredState: frontendNlbTargetGroupDesiredState, defaultTags: b.defaultTags, externalManagedTags: b.externalManagedTags, @@ -169,13 +172,14 @@ func (b *defaultModelBuilder) Build(ctx context.Context, ingGroup Group, metrics defaultHealthCheckMatcherGRPCCode: "12", loadBalancer: nil, + frontendNlb: nil, tgByResID: make(map[string]*elbv2model.TargetGroup), backendServices: make(map[types.NamespacedName]*corev1.Service), } if err := task.run(ctx); err != nil { - return nil, nil, nil, false, err + return nil, nil, nil, false, nil, nil, err } - return task.stack, task.loadBalancer, task.secretKeys, task.backendSGAllocated, nil + return task.stack, task.loadBalancer, task.secretKeys, task.backendSGAllocated, frontendNlbTargetGroupDesiredState, task.frontendNlb, nil } // the default model build task @@ -226,10 +230,12 @@ type defaultModelBuildTask struct { defaultHealthCheckMatcherHTTPCode string defaultHealthCheckMatcherGRPCCode string - loadBalancer *elbv2model.LoadBalancer - tgByResID map[string]*elbv2model.TargetGroup - backendServices map[types.NamespacedName]*corev1.Service - secretKeys []types.NamespacedName + loadBalancer *elbv2model.LoadBalancer + tgByResID map[string]*elbv2model.TargetGroup + backendServices map[types.NamespacedName]*corev1.Service + secretKeys []types.NamespacedName + frontendNlb *elbv2model.LoadBalancer + frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState metricsCollector lbcmetrics.MetricCollector } @@ -250,6 +256,7 @@ func (t *defaultModelBuildTask) run(ctx context.Context) error { return nil } + listenerPortConfigByIngress := make(map[types.NamespacedName]map[int32]listenPortConfig) ingListByPort := make(map[int32][]ClassifiedIngress) listenPortConfigsByPort := make(map[int32][]listenPortConfigWithIngress) for _, member := range t.ingGroup.Members { @@ -258,6 +265,9 @@ func (t *defaultModelBuildTask) run(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "ingress: %v", ingKey.String()) } + + listenerPortConfigByIngress[ingKey] = listenPortConfigByPortForIngress + for port, cfg := range listenPortConfigByPortForIngress { ingListByPort[port] = append(ingListByPort[port], member) listenPortConfigsByPort[port] = append(listenPortConfigsByPort[port], listenPortConfigWithIngress{ @@ -299,6 +309,11 @@ func (t *defaultModelBuildTask) run(ctx context.Context) error { if err := t.buildLoadBalancerAddOns(ctx, lb.LoadBalancerARN()); err != nil { return errmetrics.NewErrorWithMetrics(controllerName, "build_load_balancer_addons", err, t.metricsCollector) } + + if err := t.buildFrontendNlbModel(ctx, lb, listenerPortConfigByIngress); err != nil { + return errmetrics.NewErrorWithMetrics(controllerName, "build_frontend_nlb", err, t.metricsCollector) + } + return nil } diff --git a/pkg/ingress/model_builder_test.go b/pkg/ingress/model_builder_test.go index 0c27e4743c..dfcd3a683d 100644 --- a/pkg/ingress/model_builder_test.go +++ b/pkg/ingress/model_builder_test.go @@ -3955,7 +3955,7 @@ func Test_defaultModelBuilder_Build(t *testing.T) { b.enableIPTargetType = *tt.enableIPTargetType } - gotStack, _, _, _, err := b.Build(context.Background(), tt.args.ingGroup, b.metricsCollector) + gotStack, _, _, _, _, _, err := b.Build(context.Background(), tt.args.ingGroup, b.metricsCollector) if tt.wantErr != "" { assert.EqualError(t, err, tt.wantErr) } else { diff --git a/pkg/model/core/frontend_nlb_target_group.go b/pkg/model/core/frontend_nlb_target_group.go new file mode 100644 index 0000000000..f83ef844b0 --- /dev/null +++ b/pkg/model/core/frontend_nlb_target_group.go @@ -0,0 +1,31 @@ +package core + +// FrontendNlbTargetGroupState represents the state of a single ALB Target Type target group with its ALB target +type FrontendNlbTargetGroupState struct { + Name string + ARN StringToken + Port int32 + TargetARN StringToken + TargetPort int32 +} + +// FrontendNlbTargetGroupDesiredState maintains a mapping of target groups targeting ALB +type FrontendNlbTargetGroupDesiredState struct { + TargetGroups map[string]*FrontendNlbTargetGroupState +} + +func NewFrontendNlbTargetGroupDesiredState() *FrontendNlbTargetGroupDesiredState { + return &FrontendNlbTargetGroupDesiredState{ + TargetGroups: make(map[string]*FrontendNlbTargetGroupState), + } +} + +func (m *FrontendNlbTargetGroupDesiredState) AddTargetGroup(targetGroupName string, targetGroupARN StringToken, targetARN StringToken, port int32, targetPort int32) { + m.TargetGroups[targetGroupName] = &FrontendNlbTargetGroupState{ + Name: targetGroupName, + ARN: targetGroupARN, + Port: port, + TargetARN: targetARN, + TargetPort: targetPort, + } +} diff --git a/pkg/model/elbv2/target_group.go b/pkg/model/elbv2/target_group.go index ffd6a3e12a..2d57fbd35e 100644 --- a/pkg/model/elbv2/target_group.go +++ b/pkg/model/elbv2/target_group.go @@ -2,6 +2,7 @@ package elbv2 import ( "context" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/aws-load-balancer-controller/pkg/model/core" @@ -55,6 +56,7 @@ type TargetType string const ( TargetTypeInstance TargetType = "instance" TargetTypeIP TargetType = "ip" + TargetTypeALB TargetType = "alb" ) type TargetGroupIPAddressType string diff --git a/test/e2e/ingress/utils.go b/test/e2e/ingress/utils.go index c536b8c156..609143e708 100644 --- a/test/e2e/ingress/utils.go +++ b/test/e2e/ingress/utils.go @@ -1,6 +1,11 @@ package ingress -import networking "k8s.io/api/networking/v1" +import ( + "fmt" + "strings" + + networking "k8s.io/api/networking/v1" +) func FindIngressDNSName(ing *networking.Ingress) string { for _, ingSTS := range ing.Status.LoadBalancer.Ingress { @@ -10,3 +15,17 @@ func FindIngressDNSName(ing *networking.Ingress) string { } return "" } + +func FindIngressTwoDNSName(ing *networking.Ingress) (albDNS string, nlbDNS string) { + for _, ingSTS := range ing.Status.LoadBalancer.Ingress { + if ingSTS.Hostname != "" { + fmt.Println("ingSTS.Hostname", ingSTS.Hostname) + if strings.Contains(ingSTS.Hostname, "elb.amazonaws.com") { + albDNS = ingSTS.Hostname + } else { + nlbDNS = ingSTS.Hostname + } + } + } + return albDNS, nlbDNS +} diff --git a/test/e2e/ingress/vanilla_ingress_test.go b/test/e2e/ingress/vanilla_ingress_test.go index a79a3e673d..327242bca5 100644 --- a/test/e2e/ingress/vanilla_ingress_test.go +++ b/test/e2e/ingress/vanilla_ingress_test.go @@ -828,6 +828,60 @@ var _ = Describe("vanilla ingress tests", func() { Body().Equal("Hello World!") }) }) + + Context("with frontend NLB enabled", func() { + It("should create a frontend NLB and route traffic correctly", func() { + appBuilder := manifest.NewFixedResponseServiceBuilder() + ingBuilder := manifest.NewIngressBuilder() + dp, svc := appBuilder.Build(sandboxNS.Name, "app", tf.Options.TestImageRegistry) + ingBackend := networking.IngressBackend{ + Service: &networking.IngressServiceBackend{ + Name: svc.Name, + Port: networking.ServiceBackendPort{ + Number: 80, + }, + }, + } + annotation := map[string]string{ + "kubernetes.io/ingress.class": "alb", + "alb.ingress.kubernetes.io/scheme": "internet-facing", + "alb.ingress.kubernetes.io/listen-ports": `[{"HTTP": 80}]`, + "alb.ingress.kubernetes.io/enable-frontend-nlb": "true", + "alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path": "/path", + "alb.ingress.kubernetes.io/frontend-nlb-scheme": "internet-facing", + } + + ing := ingBuilder. + AddHTTPRoute("", networking.HTTPIngressPath{Path: "/path", PathType: &exact, Backend: ingBackend}). + WithAnnotations(annotation).Build(sandboxNS.Name, "ing") + resStack := fixture.NewK8SResourceStack(tf, dp, svc, ing) + err := resStack.Setup(ctx) + Expect(err).NotTo(HaveOccurred()) + + defer resStack.TearDown(ctx) + + time.Sleep(6 * time.Minute) // Waiting 6 minutes for target registration and DNS propagation, and health check + + albARN, albDNS, nlbARN, nlbDNS := ExpectTwoLBProvisionedForIngress(ctx, tf, ing) + + // test alb traffic + ExpectLBDNSBeAvailable(ctx, tf, albARN, albDNS) + httpExp := httpexpect.New(tf.LoggerReporter, fmt.Sprintf("http://%v", albDNS)) + httpExp.GET("/path").Expect(). + Status(http.StatusOK). + Body().Equal("Hello World!") + + // test nlb traffic + ExpectLBDNSBeAvailable(ctx, tf, nlbARN, nlbDNS) + nlbHttpExp := httpexpect.New(tf.LoggerReporter, fmt.Sprintf("http://%v", nlbDNS)) + nlbHttpExp.GET("/path").Expect(). + Status(http.StatusOK). + Body().Equal("Hello World!") + + }) + + }) + }) // ExpectOneLBProvisionedForIngress expects one LoadBalancer provisioned for Ingress. @@ -847,6 +901,30 @@ func ExpectOneLBProvisionedForIngress(ctx context.Context, tf *framework.Framewo return lbARN, lbDNS } +// ExpectTwoLBProvisionedForIngress expects one ALB and one frontend NLB provisioned for the Ingress. +func ExpectTwoLBProvisionedForIngress(ctx context.Context, tf *framework.Framework, ing *networking.Ingress) (albARN string, albDNS string, nlbARN string, nlbDNS string) { + // Verify ALB is provisioned + Eventually(func(g Gomega) { + err := tf.K8sClient.Get(ctx, k8s.NamespacedName(ing), ing) + g.Expect(err).NotTo(HaveOccurred()) + albDNS, nlbDNS = FindIngressTwoDNSName(ing) + g.Expect(albDNS).ShouldNot(BeEmpty()) + g.Expect(nlbDNS).ShouldNot(BeEmpty()) + }, utils.IngressReconcileTimeout, utils.PollIntervalShort).Should(Succeed()) + tf.Logger.Info("ingress DNS populated", "dnsName", albDNS) + tf.Logger.Info("ingress DNS populated", "dnsName", nlbDNS) + + var err error + albARN, err = tf.LBManager.FindLoadBalancerByDNSName(ctx, albDNS) + Expect(err).ShouldNot(HaveOccurred()) + tf.Logger.Info("ALB provisioned", "arn", albARN) + + nlbARN, err = tf.LBManager.FindLoadBalancerByDNSName(ctx, nlbDNS) + Expect(err).ShouldNot(HaveOccurred()) + tf.Logger.Info("NLB provisioned", "arn", nlbARN) + return albARN, albDNS, nlbARN, nlbDNS +} + // ExpectNoLBProvisionedForIngress expects no LoadBalancer provisioned for Ingress. func ExpectNoLBProvisionedForIngress(ctx context.Context, tf *framework.Framework, ing *networking.Ingress) { Consistently(func(g Gomega) {