Skip to content

Commit 9809175

Browse files
committed
Enable frontend NLB
1 parent a2cd918 commit 9809175

17 files changed

+2433
-79
lines changed

controllers/gateway/gateway_controller.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package gateway
33
import (
44
"context"
55
"fmt"
6+
67
"github.com/go-logr/logr"
78
"github.com/pkg/errors"
89
corev1 "k8s.io/api/core/v1"
@@ -209,7 +210,7 @@ func (r *gatewayReconciler) reconcileUpdate(ctx context.Context, gw *gwv1.Gatewa
209210
}
210211

211212
func (r *gatewayReconciler) deployModel(ctx context.Context, gw *gwv1.Gateway, stack core.Stack) error {
212-
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName); err != nil {
213+
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, r.controllerName, nil); err != nil {
213214
var requeueNeededAfter *runtime.RequeueNeededAfter
214215
if errors.As(err, &requeueNeededAfter) {
215216
return err

controllers/ingress/group_controller.go

+70-15
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
151151
return errmetrics.NewErrorWithMetrics(controllerName, "add_group_finalizer_error", err, r.metricsCollector)
152152
}
153153

154-
_, lb, err := r.buildAndDeployModel(ctx, ingGroup)
154+
_, lb, frontendNlb, err := r.buildAndDeployModel(ctx, ingGroup)
155155
if err != nil {
156156
return err
157157
}
@@ -164,7 +164,14 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
164164
if statusErr != nil {
165165
return
166166
}
167-
statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS)
167+
var frontendNlbDNS string
168+
if frontendNlb != nil {
169+
frontendNlbDNS, statusErr = frontendNlb.DNSName().Resolve(ctx)
170+
if statusErr != nil {
171+
return
172+
}
173+
}
174+
statusErr = r.updateIngressGroupStatus(ctx, ingGroup, lbDNS, frontendNlbDNS)
168175
if statusErr != nil {
169176
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedUpdateStatus,
170177
fmt.Sprintf("Failed update status due to %v", statusErr))
@@ -191,38 +198,40 @@ func (r *groupReconciler) reconcile(ctx context.Context, req reconcile.Request)
191198
return nil
192199
}
193200

194-
func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, error) {
201+
func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingress.Group) (core.Stack, *elbv2model.LoadBalancer, *elbv2model.LoadBalancer, error) {
195202
var stack core.Stack
196203
var lb *elbv2model.LoadBalancer
197204
var secrets []types.NamespacedName
198205
var backendSGRequired bool
199206
var err error
207+
var frontendNlbTargetGroupDesiredState *core.FrontendNlbTargetGroupDesiredState
208+
var frontendNlb *elbv2model.LoadBalancer
200209
buildModelFn := func() {
201-
stack, lb, secrets, backendSGRequired, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector)
210+
stack, lb, secrets, backendSGRequired, frontendNlbTargetGroupDesiredState, frontendNlb, err = r.modelBuilder.Build(ctx, ingGroup, r.metricsCollector)
202211
}
203212
r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "build_model", buildModelFn)
204213
if err != nil {
205214
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
206-
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector)
215+
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "build_model_error", err, r.metricsCollector)
207216
}
208217
stackJSON, err := r.stackMarshaller.Marshal(stack)
209218
if err != nil {
210219
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedBuildModel, fmt.Sprintf("Failed build model due to %v", err))
211-
return nil, nil, err
220+
return nil, nil, nil, err
212221
}
213222
r.logger.Info("successfully built model", "model", stackJSON)
214223

215224
deployModelFn := func() {
216-
err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress")
225+
err = r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "ingress", frontendNlbTargetGroupDesiredState)
217226
}
218227
r.metricsCollector.ObserveControllerReconcileLatency(controllerName, "deploy_model", deployModelFn)
219228
if err != nil {
220229
var requeueNeededAfter *runtime.RequeueNeededAfter
221230
if errors.As(err, &requeueNeededAfter) {
222-
return nil, nil, err
231+
return nil, nil, nil, err
223232
}
224233
r.recordIngressGroupEvent(ctx, ingGroup, corev1.EventTypeWarning, k8s.IngressEventReasonFailedDeployModel, fmt.Sprintf("Failed deploy model due to %v", err))
225-
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector)
234+
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "deploy_model_error", err, r.metricsCollector)
226235
}
227236
r.logger.Info("successfully deployed model", "ingressGroup", ingGroup.ID)
228237
r.secretsManager.MonitorSecrets(ingGroup.ID.String(), secrets)
@@ -232,9 +241,9 @@ func (r *groupReconciler) buildAndDeployModel(ctx context.Context, ingGroup ingr
232241
inactiveResources = append(inactiveResources, k8s.ToSliceOfNamespacedNames(ingGroup.Members)...)
233242
}
234243
if err := r.backendSGProvider.Release(ctx, networkingpkg.ResourceTypeIngress, inactiveResources); err != nil {
235-
return nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector)
244+
return nil, nil, nil, errmetrics.NewErrorWithMetrics(controllerName, "release_auto_generated_backend_sg_error", err, r.metricsCollector)
236245
}
237-
return stack, lb, nil
246+
return stack, lb, frontendNlb, nil
238247
}
239248

240249
func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup ingress.Group, eventType string, reason string, message string) {
@@ -243,29 +252,41 @@ func (r *groupReconciler) recordIngressGroupEvent(_ context.Context, ingGroup in
243252
}
244253
}
245254

246-
func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string) error {
255+
func (r *groupReconciler) updateIngressGroupStatus(ctx context.Context, ingGroup ingress.Group, lbDNS string, frontendNLBDNS string) error {
247256
for _, member := range ingGroup.Members {
248-
if err := r.updateIngressStatus(ctx, lbDNS, member.Ing); err != nil {
257+
if err := r.updateIngressStatus(ctx, lbDNS, frontendNLBDNS, member.Ing); err != nil {
249258
return err
250259
}
251260
}
252261
return nil
253262
}
254263

255-
func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, ing *networking.Ingress) error {
264+
func (r *groupReconciler) updateIngressStatus(ctx context.Context, lbDNS string, frontendNlbDNS string, ing *networking.Ingress) error {
265+
ingOld := ing.DeepCopy()
256266
if len(ing.Status.LoadBalancer.Ingress) != 1 ||
257267
ing.Status.LoadBalancer.Ingress[0].IP != "" ||
258268
ing.Status.LoadBalancer.Ingress[0].Hostname != lbDNS {
259-
ingOld := ing.DeepCopy()
260269
ing.Status.LoadBalancer.Ingress = []networking.IngressLoadBalancerIngress{
261270
{
262271
Hostname: lbDNS,
263272
},
264273
}
274+
}
275+
276+
// Ensure frontendNLBDNS is appended if it is not already added
277+
if frontendNlbDNS != "" && !hasFrontendNlbHostName(ing.Status.LoadBalancer.Ingress, frontendNlbDNS) {
278+
ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networking.IngressLoadBalancerIngress{
279+
Hostname: frontendNlbDNS,
280+
})
281+
}
282+
283+
if !isIngressStatusEqual(ingOld.Status.LoadBalancer.Ingress, ing.Status.LoadBalancer.Ingress) {
265284
if err := r.k8sClient.Status().Patch(ctx, ing, client.MergeFrom(ingOld)); err != nil {
266285
return errors.Wrapf(err, "failed to update ingress status: %v", k8s.NamespacedName(ing))
267286
}
287+
268288
}
289+
269290
return nil
270291
}
271292

@@ -387,3 +408,37 @@ func isResourceKindAvailable(resList *metav1.APIResourceList, kind string) bool
387408
}
388409
return false
389410
}
411+
412+
func isIngressStatusEqual(a, b []networking.IngressLoadBalancerIngress) bool {
413+
if len(a) != len(b) {
414+
return false
415+
}
416+
417+
setA := make(map[string]struct{}, len(a))
418+
setB := make(map[string]struct{}, len(b))
419+
420+
for _, ingress := range a {
421+
setA[ingress.Hostname] = struct{}{}
422+
}
423+
424+
for _, ingress := range b {
425+
setB[ingress.Hostname] = struct{}{}
426+
}
427+
428+
for key := range setA {
429+
if _, exists := setB[key]; !exists {
430+
return false
431+
}
432+
}
433+
return true
434+
}
435+
436+
func hasFrontendNlbHostName(ingressList []networking.IngressLoadBalancerIngress, frontendNlbDNS string) bool {
437+
for _, ingress := range ingressList {
438+
if ingress.Hostname == frontendNlbDNS {
439+
return true
440+
}
441+
442+
}
443+
return false
444+
}

controllers/service/service_controller.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ func (r *serviceReconciler) buildModel(ctx context.Context, svc *corev1.Service)
152152
}
153153

154154
func (r *serviceReconciler) deployModel(ctx context.Context, svc *corev1.Service, stack core.Stack) error {
155-
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service"); err != nil {
155+
if err := r.stackDeployer.Deploy(ctx, stack, r.metricsCollector, "service", nil); err != nil {
156156
var requeueNeededAfter *runtime.RequeueNeededAfter
157157
if errors.As(err, &requeueNeededAfter) {
158158
return err

docs/guide/ingress/annotations.md

+121
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,19 @@ You can add annotations to kubernetes Ingress and Service objects to customize t
6363
| [alb.ingress.kubernetes.io/listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap |N/A| Ingress |Merge|
6464
| [alb.ingress.kubernetes.io/minimum-load-balancer-capacity](#load-balancer-capacity-reservation) | stringMap |N/A| Ingress | Exclusive |
6565
| [alb.ingress.kubernetes.io/ipam-ipv4-pool-id](#ipam-ipv4-pool-id) | string |N/A| Ingress | Exclusive |
66+
| [alb.ingress.kubernetes.io/enable-frontend-nlb](#enable-frontend-nlb) | boolean |false | Ingress | Exclusive |
67+
| [alb.ingress.kubernetes.io/frontend-nlb-scheme](#frontend-nlb-scheme) | internal \| internet-facing |internal| Ingress | Exclusive |
68+
| [alb.ingress.kubernetes.io/frontend-nlb-subnets](#frontend-nlb-subnets) | stringList |N/A| Ingress | Exclusive |
69+
| [alb.ingress.kubernetes.io/frontend-nlb-security-groups](#frontend-nlb-security-groups) | stringList |N/A| Ingress | Exclusive |
70+
| [alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping](#frontend-nlb-listener-port-mapping) | stringMap |N/A| Ingress | Merge |
71+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port](#frontend-nlb-healthcheck-port) | integer \| traffic-port |traffic-port| Ingress | N/A |
72+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol](#frontend-nlb-healthcheck-protocol) | HTTP \| HTTPS |HTTP| Ingress | N/A |
73+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path](#frontend-nlb-healthcheck-path) | string |/| Ingress | N/A |
74+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds](#frontend-nlb-healthcheck-interval-seconds) | integer |15| Ingress | N/A |
75+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds](#frontend-nlb-healthcheck-timeout-seconds) | integer |5| Ingress | N/A |
76+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count](#frontend-nlb-healthcheck-healthy-threshold-count) | integer |3| Ingress | N/A |
77+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count](#frontend-nlb-healthcheck-unhealthy-threshold-count) | integer |3| Ingress | N/A |
78+
| [alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes](#frontend-nlb-healthcheck-success-codes) | string |200| Ingress | N/A |
6679

6780
## IngressGroup
6881
IngressGroup feature enables you to group multiple Ingress resources together.
@@ -1024,3 +1037,111 @@ Load balancer capacity unit reservation can be configured via following annotati
10241037
- disable shield protection
10251038
```alb.ingress.kubernetes.io/shield-advanced-protection: 'false'
10261039
```
1040+
1041+
1042+
## Enable frontend NLB
1043+
When this option is set to true, the controller will automatically provision a Network Load Balancer and register the Application Load Balancer as its target. Additional annotations are available to customize the NLB configurations, including options for scheme, security groups, subnets, and health check. The ingress will expose both the ALB DNS name and NLB DNS name. This allows users to combine the benefits of NLB and ALB into a single solution, leveraging NLB features like static IP address and PrivateLink, while retaining the rich routing capabilities of ALB.
1044+
1045+
- <a name="enable-frontend-nlb">`alb.ingress.kubernetes.io/enable-frontend-nlb`</a> enables frontend Network Load Balancer functionality.
1046+
1047+
!!!example
1048+
- Enable frontend nlb
1049+
```
1050+
alb.ingress.kubernetes.io/enable-frontend-nlb: "true"
1051+
```
1052+
1053+
- <a name="frontend-nlb-scheme">`alb.ingress.kubernetes.io/frontend-nlb-scheme`</a> specifies the scheme for the Network Load Balancer.
1054+
1055+
!!!example
1056+
- Set NLB scheme to internal
1057+
```
1058+
alb.ingress.kubernetes.io/frontend-nlb-scheme: internal
1059+
```
1060+
1061+
- <a name="frontend-nlb-subnets">`alb.ingress.kubernetes.io/frontend-nlb-subnets`</a> specifies the subnets for the Network Load Balancer.
1062+
1063+
!!!example
1064+
- Specify subnets for NLB
1065+
```
1066+
alb.ingress.kubernetes.io/frontend-nlb-subnets: subnet-xxxx1,subnet-xxxx2
1067+
```
1068+
1069+
- <a name="frontend-nlb-security-groups">`alb.ingress.kubernetes.io/frontend-nlb-security-groups`</a> specifies the security groups for the Network Load Balancer.
1070+
1071+
!!!example
1072+
- Specify security groups for NLB
1073+
```
1074+
alb.ingress.kubernetes.io/frontend-nlb-security-groups: sg-xxxx1,sg-xxxx2
1075+
```
1076+
1077+
- <a name="frontend-nlb-listener-port-mapping">`alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping`</a> specifies the port mapping configuration for the Network Load Balancer listeners.
1078+
1079+
!!!example
1080+
- Configure listener port mapping
1081+
```
1082+
alb.ingress.kubernetes.io/frontend-nlb-listener-port-mapping: 80=443
1083+
```
1084+
1085+
- <a name="frontend-nlb-healthcheck-port">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port`</a> specifies the port used for health checks.
1086+
1087+
!!!example
1088+
- Set health check port
1089+
```
1090+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-port: traffic-port
1091+
```
1092+
1093+
- <a name="frontend-nlb-healthcheck-protocol">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol`</a> specifies the protocol used for health checks.
1094+
1095+
!!!example
1096+
- Set health check protocol
1097+
```
1098+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-protocol: HTTP
1099+
```
1100+
1101+
- <a name="frontend-nlb-healthcheck-path">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path`</a> specifies the destination path for health checks.
1102+
1103+
!!!example
1104+
- Set health check path
1105+
```
1106+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-path: /health
1107+
```
1108+
1109+
- <a name="frontend-nlb-healthcheck-interval-seconds">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds`</a> specifies the interval between consecutive health checks.
1110+
1111+
!!!example
1112+
- Set health check interval
1113+
```
1114+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-interval-seconds: '15'
1115+
```
1116+
1117+
- <a name="frontend-nlb-healthcheck-timeout-seconds">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds`</a> specifies the target group health check timeout.
1118+
1119+
!!!example
1120+
- Set health check timeout
1121+
```
1122+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-timeout-seconds: '5'
1123+
```
1124+
1125+
- <a name="frontend-nlb-healthcheck-healthy-threshold-count">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count`</a> specifies the consecutive health check successes required before a target is considered healthy.
1126+
1127+
!!!example
1128+
- Set healthy threshold count
1129+
```
1130+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-healthy-threshold-count: '3'
1131+
```
1132+
1133+
- <a name="frontend-nlb-healthcheck-unhealthy-threshold-count">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count`</a> specifies the consecutive health check failures before a target gets marked unhealthy.
1134+
1135+
!!!example
1136+
- Set unhealthy threshold count
1137+
```
1138+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-unhealthy-threshold-count: '3'
1139+
```
1140+
1141+
- <a name="frontend-nlb-healthcheck-success-codes">`alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes`</a> Specifies the HTTP codes that indicate a successful health check.
1142+
1143+
!!!example
1144+
- Set success codes for health check
1145+
```
1146+
alb.ingress.kubernetes.io/frontend-nlb-healthcheck-success-codes: '200'
1147+
```

0 commit comments

Comments
 (0)