Skip to content

Enable frontend NLB #4126

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion controllers/gateway/gateway_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package gateway
import (
"context"
"fmt"

"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -242,7 +243,7 @@ func (r *gatewayReconciler) reconcileUpdate(ctx context.Context, gw *gwv1.Gatewa
}

func (r *gatewayReconciler) deployModel(ctx context.Context, gw *gwv1.Gateway, stack core.Stack) error {
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName); err != nil {
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName, nil); err != nil {
var requeueNeededAfter *runtime.RequeueNeededAfter
if errors.As(err, &requeueNeededAfter) {
return err
Expand Down
85 changes: 70 additions & 15 deletions controllers/ingress/group_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
return errmetrics.NewErrorWithMetrics(controllerName, "add_group_finalizer_error", err, r.metricsCollector)
}

_, lb, err := r.buildAndDeployModel(ctx, ingGroup)
_, lb, frontendNlb, err := r.buildAndDeployModel(ctx, ingGroup)
if err != nil {
return err
}
Expand All @@ -164,7 +164,14 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
if statusErr != nil {
return
}
statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS)
var frontendNlbDNS string
if frontendNlb != nil {
frontendNlbDNS, statusErr = frontendNlb.DNSName().Resolve(ctx)
if statusErr != nil {
return
}
}
statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS, frontendNlbDNS)
if statusErr != nil {
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedUpdateStatus,
fmt.Sprintf("Failed update status due to %v", statusErr))
Expand All @@ -191,38 +198,40 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
return nil
}

func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, error) {
func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, *elbv2model.LoadBalancer, error) {
var stack core.Stack
var lb *elbv2model.LoadBalancer
var secrets []types.NamespacedName
var backendSGRequired bool
var err error
var frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState
var frontendNlb *elbv2model.LoadBalancer
buildModelFn := func() {
stack, lb, secrets, backendSGRequired, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector)
stack, lb, secrets, backendSGRequired, frontendNlbTargetGroupDesiredState, frontendNlb, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector)
}
r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "build_model", buildModelFn)
if err != nil {
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector)
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector)
}
stackJSON, err := r.stackMarshaller.Marshal(stack)
if err != nil {
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
return nil, nil, err
return nil, nil, nil, err
}
r.logger.Info("successfully built model", "model", stackJSON)

deployModelFn := func() {
err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress")
err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress", frontendNlbTargetGroupDesiredState)
}
r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "deploy_model", deployModelFn)
if err != nil {
var requeueNeededAfter *runtime.RequeueNeededAfter
if errors.As(err, &requeueNeededAfter) {
return nil, nil, err
return nil, nil, nil, err
}
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedDeployModel, fmt.Sprintf("Failed deploy model due to %v", err))
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector)
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector)
}
r.logger.Info("successfully deployed model", "ingressGroup", ingGroup.ID)
r.secretsManager.MonitorSecrets(ingGroup.ID.String(), secrets)
Expand All @@ -232,9 +241,9 @@ func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingr
inactiveResources = append(inactiveResources, k8s.ToSliceOfNamespacedNames(ingGroup.Members)...)
}
if err := r.backendSGProvider.Release(ctx, networkingpkg.ResourceTypeIngress, inactiveResources); err != nil {
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector)
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector)
}
return stack, lb, nil
return stack, lb, frontendNlb, nil
}

func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup ingress.Group, eventType string, reason string, message string) {
Expand All @@ -243,29 +252,41 @@ func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup in
}
}

func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string) error {
func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string, frontendNLBDNS string) error {
for _, member := range ingGroup.Members {
if err := r.updateIngressStatus(ctx, lbDNS, member.Ing); err != nil {
if err := r.updateIngressStatus(ctx, lbDNS, frontendNLBDNS, member.Ing); err != nil {
return err
}
}
return nil
}

func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, ing *networking.Ingress) error {
func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, frontendNlbDNS string, ing *networking.Ingress) error {
ingOld := ing.DeepCopy()
if len(ing.Status.LoadBalancer.Ingress) != 1 ||
ing.Status.LoadBalancer.Ingress[0].IP != "" ||
ing.Status.LoadBalancer.Ingress[0].Hostname != lbDNS {
ingOld := ing.DeepCopy()
ing.Status.LoadBalancer.Ingress = []networking.IngressLoadBalancerIngress{
{
Hostname: lbDNS,
},
}
}

// Ensure frontendNLBDNS is appended if it is not already added
if frontendNlbDNS != "" && !hasFrontendNlbHostName(ing.Status.LoadBalancer.Ingress, frontendNlbDNS) {
ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networking.IngressLoadBalancerIngress{
Hostname: frontendNlbDNS,
})
}

if !isIngressStatusEqual(ingOld.Status.LoadBalancer.Ingress, ing.Status.LoadBalancer.Ingress) {
if err := r.k8sClient.Status().Patch(ctx, ing, client.MergeFrom(ingOld)); err != nil {
return errors.Wrapf(err, "failed to update ingress status: %v", k8s.NamespacedName(ing))
}

}

return nil
}

Expand Down Expand Up @@ -387,3 +408,37 @@ func isResourceKindAvailable(resList *metav1.APIResourceList, kind string) bool
}
return false
}

func isIngressStatusEqual(a, b []networking.IngressLoadBalancerIngress) bool {
if len(a) != len(b) {
return false
}

setA := make(map[string]struct{}, len(a))
setB := make(map[string]struct{}, len(b))

for _, ingress := range a {
setA[ingress.Hostname] = struct{}{}
}

for _, ingress := range b {
setB[ingress.Hostname] = struct{}{}
}

for key := range setA {
if _, exists := setB[key]; !exists {
return false
}
}
return true
}

func hasFrontendNlbHostName(ingressList []networking.IngressLoadBalancerIngress, frontendNlbDNS string) bool {
for _, ingress := range ingressList {
if ingress.Hostname == frontendNlbDNS {
return true
}

}
return false
}
2 changes: 1 addition & 1 deletion controllers/service/service_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func (r *serviceReconciler) buildModel(ctx context.Context, svc *corev1.Service)
}

func (r *serviceReconciler) deployModel(ctx context.Context, svc *corev1.Service, stack core.Stack) error {
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service"); err != nil {
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service", nil); err != nil {
var requeueNeededAfter *runtime.RequeueNeededAfter
if errors.As(err, &requeueNeededAfter) {
return err
Expand Down
128 changes: 128 additions & 0 deletions docs/guide/ingress/annotations.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,19 @@ You can add annotations to kubernetes Ingress and Service objects to customize t
| [alb.ingress.kubernetes.io/listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap |N/A| Ingress |Merge|
| [alb.ingress.kubernetes.io/minimum-load-balancer-capacity](#load-balancer-capacity-reservation) | stringMap |N/A| Ingress | Exclusive |
| [alb.ingress.kubernetes.io/ipam-ipv4-pool-id](#ipam-ipv4-pool-id) | string |N/A| Ingress | Exclusive |
| [alb.ingress.kubernetes.io/enable-frontend-nlb](#enable-frontend-nlb) | boolean |false | Ingress | Exclusive |
| [alb.ingress.kubernetes.io/frontend-nlb-scheme](#frontend-nlb-scheme) | internal \| internet-facing |internal| Ingress | Exclusive |
| [alb.ingress.kubernetes.io/frontend-nlb-subnets](#frontend-nlb-subnets) | stringList |N/A| Ingress | Exclusive |
| [alb.ingress.kubernetes.io/frontend-nlb-security-groups](#frontend-nlb-security-groups) | stringList |N/A| Ingress | Exclusive |
| [alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping](#frontend-nlb-listener-port-mapping) | stringMap |N/A| Ingress | Merge |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port](#frontend-nlb-healthcheck-port) | integer \| traffic-port |traffic-port| Ingress | N/A |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol](#frontend-nlb-healthcheck-protocol) | HTTP \| HTTPS |HTTP| Ingress | N/A |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path](#frontend-nlb-healthcheck-path) | string |/| Ingress | N/A |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds](#frontend-nlb-healthcheck-interval-seconds) | integer |15| Ingress | N/A |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds](#frontend-nlb-healthcheck-timeout-seconds) | integer |5| Ingress | N/A |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count](#frontend-nlb-healthcheck-healthy-threshold-count) | integer |3| Ingress | N/A |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count](#frontend-nlb-healthcheck-unhealthy-threshold-count) | integer |3| Ingress | N/A |
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes](#frontend-nlb-healthcheck-success-codes) | string |200| Ingress | N/A |

## IngressGroup
IngressGroup feature enables you to group multiple Ingress resources together.
Expand Down Expand Up @@ -1024,3 +1037,118 @@ Load balancer capacity unit reservation can be configured via following annotati
- disable shield protection
```alb.ingress.kubernetes.io/shield-advanced-protection: 'false'
```


## Enable frontend NLB
When this option is set to true, the controller will automatically provision a Network Load Balancer and register the Application Load Balancer as its target. Additional annotations are available to customize the NLB configurations, including options for scheme, security groups, subnets, and health check. The ingress resource will have two status entries, one for the NLB DNS and one for the ALB DNS. This allows users to combine the benefits of NLB and ALB into a single solution, leveraging NLB features like static IP address and PrivateLink, while retaining the rich routing capabilities of ALB.

!!!warning
- If you need to change the ALB [scheme](#scheme), make sure to disable this feature first. Changing the scheme will create a new ALB, which could interfere with the current configuration.
- If you create ingress and enable the feature at once, provisioning the NLB and registering the ALB as target can take up to 3-4 mins to complete.

- <a name="enable-frontend-nlb">`alb.ingress.kubernetes.io/enable-frontend-nlb`</a> enables frontend Network Load Balancer functionality.

!!!example
- Enable frontend nlb
```
alb.ingress.kubernetes.io/enable-frontend-nlb: "true"
```

- <a name="frontend-nlb-scheme">`alb.ingress.kubernetes.io/frontend-nlb-scheme`</a> specifies the scheme for the Network Load Balancer.

!!!example
- Set NLB scheme to internet-facing
```
alb.ingress.kubernetes.io/frontend-nlb-scheme: internet-facing
```

- <a name="frontend-nlb-subnets">`alb.ingress.kubernetes.io/frontend-nlb-subnets`</a> specifies the subnets for the Network Load Balancer.

!!!example
- Specify subnets for NLB
```
alb.ingress.kubernetes.io/frontend-nlb-subnets: subnet-xxxx1,subnet-xxxx2
```

- <a name="frontend-nlb-security-groups">`alb.ingress.kubernetes.io/frontend-nlb-security-groups`</a> specifies the security groups for the Network Load Balancer.

!!!example
- Specify security groups for NLB
```
alb.ingress.kubernetes.io/frontend-nlb-security-groups: sg-xxxx1,sg-xxxx2
```

- <a name="frontend-nlb-listener-port-mapping">`alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping`</a> specifies the port mapping configuration for the Network Load Balancer listeners.

!!!note "Default"
- The port defaults to match the ALB listener port, based on whether `alb.ingress.kubernetes.io/listen-ports`(#listen-ports) is specified.

!!!example
- Forward TCP traffic from NLB:80 to ALB:443
```
alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping: 80=443
```

- <a name="frontend-nlb-healthcheck-port">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port`</a> specifies the port used for health checks.

!!!example
- Set health check port
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port: traffic-port
```

- <a name="frontend-nlb-healthcheck-protocol">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol`</a> specifies the protocol used for health checks.

!!!example
- Set health check protocol
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol: HTTP
```

- <a name="frontend-nlb-healthcheck-path">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path`</a> specifies the destination path for health checks.

!!!example
- Set health check path
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path: /health
```

- <a name="frontend-nlb-healthcheck-interval-seconds">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds`</a> specifies the interval between consecutive health checks.

!!!example
- Set health check interval
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds: '15'
```

- <a name="frontend-nlb-healthcheck-timeout-seconds">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds`</a> specifies the target group health check timeout.

!!!example
- Set health check timeout
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds: '5'
```

- <a name="frontend-nlb-healthcheck-healthy-threshold-count">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count`</a> specifies the consecutive health check successes required before a target is considered healthy.

!!!example
- Set healthy threshold count
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count: '3'
```

- <a name="frontend-nlb-healthcheck-unhealthy-threshold-count">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count`</a> specifies the consecutive health check failures before a target gets marked unhealthy.

!!!example
- Set unhealthy threshold count
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count: '3'
```

- <a name="frontend-nlb-healthcheck-success-codes">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes`</a> specifies the HTTP codes that indicate a successful health check.

!!!example
- Set success codes for health check
```
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes: '200'
```
Loading
Loading