diff --git a/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md b/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md index a74a3b44b3..01ab0cd744 100644 --- a/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md +++ b/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md @@ -210,6 +210,22 @@ Request Body: Defines the health monitor retry count for the loadbalancer pool members to be marked down. +- `loadbalancer.openstack.org/metrics-enable` + + If 'true', enable the Prometheus listener on the loadbalancer. (default: 'false') + + The Kubernetes service must be the [owner of the LoadBalancer](#sharing-load-balancer-with-multiple-services) + + Not supported when `lb-provider=ovn` is configured in openstack-cloud-controller-manager. + +- `loadbalancer.openstack.org/metrics-port` + + Defines the Prometheus listener's port. If `metric-enable` is 'true', the annotation is automatically added to the service. Default: `9100` + +- `loadbalancer.openstack.org/metrics-allow-cidrs` + + Defines the Prometheus listener's allowed cirds. __Warning__: [security recommendations](#metric-listener-allowed-cird-security-recommendation). Default: none + - `loadbalancer.openstack.org/flavor-id` The id of the flavor that is used for creating the loadbalancer. @@ -248,6 +264,10 @@ Request Body: This annotation is automatically added and it contains the floating ip address of the load balancer service. When using `loadbalancer.openstack.org/hostname` annotation it is the only place to see the real address of the load balancer. +- `loadbalancer.openstack.org/load-balancer-vip-address` + + This annotation is automatically added and it contains the Octavia's Virtual-IP (VIP). + - `loadbalancer.openstack.org/node-selector` A set of key=value annotations used to filter nodes for targeting by the load balancer. When defined, only nodes that match all the specified key=value annotations will be targeted. If an annotation includes only a key without a value, the filter will check only for the existence of the key on the node. If the value is not set, the `node-selector` value defined in the OCCM configuration is applied. @@ -644,3 +664,64 @@ is not yet supported by OCCM. Internally, OCCM would automatically look for IPv4 or IPv6 subnet to allocate the load balancer address from based on the service's address family preference. If the subnet with preferred address family is not available, load balancer can not be created. + +### Metric endpoint configuration + +Since Octavia v2.25, Octavia proposes to expose an HTTP Prometheus endpoint. Using the annotation `loadbalancer.openstack.org/metrics-enable`, you will be able to configure this endpoint on the LoadBalancer: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: service-with-metric + namespace: default + annotations: + loadbalancer.openstack.org/metrics-enable: "true" # Enable the listener endpoint on the Octavia LoadBalancer (default false) + loadbalancer.openstack.org/metrics-port: "9100" # Listener's port (default 9100) + loadbalancer.openstack.org/metrics-allow-cidrs: "10.0.0.0/8, fe80::/10" # Listener's allowed cidrs (default none) +spec: + type: LoadBalancer +``` + +Then, you can configure a Prometheus scrapper like to get metrics from the LoadBalancer. + +e.g. Prometheus Operator configuration: + +```yaml +apiVersion: monitoring.coreos.com/v1alpha1 +kind: ScrapeConfig +metadata: + name: octavia-sd-config + labels: + release: prometheus # adapt it to your Prometheus deployment configuration +spec: + kubernetesSDConfigs: + - role: Service + relabelings: + - sourceLabels: [__meta_kubernetes_namespace] + targetLabel: namespace + action: replace + - sourceLabels: [__meta_kubernetes_service_name] + targetLabel: job + action: replace + - sourceLabels: + - __meta_kubernetes_service_annotation_loadbalancer_openstack_org_load_balancer_vip_address + - __meta_kubernetes_service_annotation_loadbalancer_openstack_org_metrics_port + separator: ":" + targetLabel: __address__ + action: replace + - sourceLabels: + - __meta_kubernetes_service_annotation_loadbalancer_openstack_org_metrics_enable + regex: "true" + action: keep +``` + +> This configuration use the `loadbalancer.openstack.org/load-balancer-vip-address` annotation that will use the Octavia's VIP to fetch the metric endpoint. Adapt it to your Octavia deployment. + +For more information: https://docs.openstack.org/octavia/latest/user/guides/monitoring.html#monitoring-with-prometheus + +Grafana dashboard for Octavia Amphora: https://grafana.com/grafana/dashboards/15828-openstack-octavia-amphora-load-balancer/ + +#### Metric listener allowed CIRD security recommendation + +If the Octavia LoadBalancer is exposed with a public IP, the Prometheus listener is also exposed (at least for Amphora). Even if no critical data are exposed by this endpoint, __it's strongly recommended to apply an allowed cidrs on the listener__ via the annotation `loadbalancer.openstack.org/metrics-allow-cidrs`. diff --git a/go.mod b/go.mod index 378947da45..393d55b86d 100644 --- a/go.mod +++ b/go.mod @@ -45,11 +45,14 @@ require ( // the below fixes the "go list -m all" execution replace ( k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.34.1 + k8s.io/cri-client => k8s.io/cri-client v0.34.1 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.34.1 k8s.io/endpointslice => k8s.io/endpointslice v0.34.1 k8s.io/externaljwt => k8s.io/externaljwt v0.34.1 k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.34.1 k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.34.1 k8s.io/kube-proxy => k8s.io/kube-proxy v0.34.1 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.34.1 k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.34.1 ) diff --git a/pkg/openstack/events.go b/pkg/openstack/events.go index 8996cafd2f..95456e1519 100644 --- a/pkg/openstack/events.go +++ b/pkg/openstack/events.go @@ -24,4 +24,5 @@ const ( eventLBFloatingIPSkipped = "LoadBalancerFloatingIPSkipped" eventLBRename = "LoadBalancerRename" eventLBLbMethodUnknown = "LoadBalancerLbMethodUnknown" + eventLBMetricListenerIgnored = "LoadBalancerMetricListenerIgnored" ) diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index c67ba1f1b4..28a759adb2 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -73,6 +73,9 @@ const ( ServiceAnnotationLoadBalancerSubnetID = "loadbalancer.openstack.org/subnet-id" ServiceAnnotationLoadBalancerNetworkID = "loadbalancer.openstack.org/network-id" ServiceAnnotationLoadBalancerMemberSubnetID = "loadbalancer.openstack.org/member-subnet-id" + ServiceAnnotationLoadBalancerMetricsEnabled = "loadbalancer.openstack.org/metrics-enable" + ServiceAnnotationLoadBalancerMetricsPort = "loadbalancer.openstack.org/metrics-port" + ServiceAnnotationLoadBalancerMetricsAllowCidrs = "loadbalancer.openstack.org/metrics-allow-cidrs" ServiceAnnotationLoadBalancerTimeoutClientData = "loadbalancer.openstack.org/timeout-client-data" ServiceAnnotationLoadBalancerTimeoutMemberConnect = "loadbalancer.openstack.org/timeout-member-connect" ServiceAnnotationLoadBalancerTimeoutMemberData = "loadbalancer.openstack.org/timeout-member-data" @@ -89,6 +92,7 @@ const ( ServiceAnnotationLoadBalancerHealthMonitorMaxRetriesDown = "loadbalancer.openstack.org/health-monitor-max-retries-down" ServiceAnnotationLoadBalancerLoadbalancerHostname = "loadbalancer.openstack.org/hostname" ServiceAnnotationLoadBalancerAddress = "loadbalancer.openstack.org/load-balancer-address" + ServiceAnnotationLoadBalancerVIPAddress = "loadbalancer.openstack.org/load-balancer-vip-address" // revive:disable:var-naming ServiceAnnotationTlsContainerRef = "loadbalancer.openstack.org/default-tls-container-ref" // revive:enable:var-naming @@ -97,14 +101,15 @@ const ( ServiceAnnotationLoadBalancerID = "loadbalancer.openstack.org/load-balancer-id" // Octavia resources name formats - servicePrefix = "kube_service_" - lbFormat = "%s%s_%s_%s" - listenerPrefix = "listener_" - listenerFormat = listenerPrefix + "%d_%s" - poolPrefix = "pool_" - poolFormat = poolPrefix + "%d_%s" - monitorPrefix = "monitor_" - monitorFormat = monitorPrefix + "%d_%s" + servicePrefix = "kube_service_" + lbFormat = "%s%s_%s_%s" + listenerPrefix = "listener_" + listenerFormat = listenerPrefix + "%d_%s" + listenerFormatMetric = listenerPrefix + "metric_%s" + poolPrefix = "pool_" + poolFormat = poolPrefix + "%d_%s" + monitorPrefix = "monitor_" + monitorFormat = monitorPrefix + "%d_%s" ) // LbaasV2 is a LoadBalancer implementation based on Octavia @@ -145,6 +150,9 @@ type serviceConfig struct { healthMonitorTimeout int healthMonitorMaxRetries int healthMonitorMaxRetriesDown int + metricAllowedCIDRs []string + metricEnabled bool + metricPort int preferredIPFamily corev1.IPFamily // preferred (the first) IP family indicated in service's `spec.ipFamilies` } @@ -423,13 +431,13 @@ func getKeyValueFromServiceAnnotation(service *corev1.Service, annotationKey str func getStringFromServiceAnnotation(service *corev1.Service, annotationKey string, defaultSetting string) string { klog.V(4).Infof("getStringFromServiceAnnotation(%s/%s, %v, %v)", service.Namespace, service.Name, annotationKey, defaultSetting) if annotationValue, ok := service.Annotations[annotationKey]; ok { - //if there is an annotation for this setting, set the "setting" var to it + // if there is an annotation for this setting, set the "setting" var to it // annotationValue can be empty, it is working as designed // it makes possible for instance provisioning loadbalancer without floatingip klog.V(4).Infof("Found a Service Annotation: %v = %v", annotationKey, annotationValue) return annotationValue } - //if there is no annotation, set "settings" var to the value from cloud config + // if there is no annotation, set "settings" var to the value from cloud config if defaultSetting != "" { klog.V(4).Infof("Could not find a Service Annotation; falling back on cloud-config setting: %v = %v", annotationKey, defaultSetting) } @@ -453,6 +461,22 @@ func getIntFromServiceAnnotation(service *corev1.Service, annotationKey string, return defaultSetting } +// getStringArrayFromServiceAnnotationSeparatedByComma searches a given v1.Service for a specific annotationKey +// and either returns the annotation's string array value (using comma as separator), or the specified defaultSetting. +// Each value of the array is TrimSpaced. After the trim, if the string is empty, remove it. +func getStringArrayFromServiceAnnotationSeparatedByComma(service *corev1.Service, annotationKey string, defaultSetting []string) []string { + klog.V(4).Infof("getStringArrayFromServiceAnnotationSeparatedByComma(%s/%s, %v, %q)", service.Namespace, service.Name, annotationKey, defaultSetting) + if annotationValue, ok := service.Annotations[annotationKey]; ok { + returnValue := cpoutil.SplitTrim(annotationValue, ',') + + klog.V(4).Infof("Found a Service Annotation: %v = %q", annotationKey, returnValue) + return returnValue + } + + klog.V(4).Infof("Could not find a Service Annotation; falling back to default setting: %v = %q", annotationKey, defaultSetting) + return defaultSetting +} + // getBoolFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's boolean value or a specified defaultSetting // If the annotation is not found or is not a valid boolean ("true" or "false"), it falls back to the defaultSetting and logs a message accordingly. func getBoolFromServiceAnnotation(service *corev1.Service, annotationKey string, defaultSetting bool) bool { @@ -1076,16 +1100,39 @@ func (lbaas *LbaasV2) buildCreateMemberOpts(ctx context.Context, port corev1.Ser } // Make sure the listener is created for Service -func (lbaas *LbaasV2) ensureOctaviaListener(ctx context.Context, lbID string, name string, curListenerMapping map[listenerKey]*listeners.Listener, port corev1.ServicePort, svcConf *serviceConfig) (*listeners.Listener, error) { - listener, isPresent := curListenerMapping[listenerKey{ - Protocol: getListenerProtocol(port.Protocol, svcConf), - Port: int(port.Port), - }] - if !isPresent { - listenerCreateOpt := lbaas.buildListenerCreateOpt(ctx, port, svcConf, name) - listenerCreateOpt.LoadbalancerID = lbID +func (lbaas *LbaasV2) ensureOctaviaListener(ctx context.Context, lbID string, name string, curListenerMapping map[listenerKey]*listeners.Listener, port corev1.ServicePort, svcConf *serviceConfig, isMetricListener bool) (*listeners.Listener, error) { + var listener *listeners.Listener + var isListenerPresent bool + + if isMetricListener { + listener, isListenerPresent = curListenerMapping[listenerKey{ + Protocol: listeners.ProtocolPrometheus, + Port: svcConf.metricPort, + }] + } else { + listener, isListenerPresent = curListenerMapping[listenerKey{ + Protocol: getListenerProtocol(port.Protocol, svcConf), + Port: int(port.Port), + }] + } - klog.V(2).Infof("Creating listener for port %d using protocol %s", int(port.Port), listenerCreateOpt.Protocol) + if !isListenerPresent { + var listenerCreateOpt listeners.CreateOpts + if isMetricListener { + listenerCreateOpt = listeners.CreateOpts{ + Name: name, + Protocol: listeners.ProtocolPrometheus, + ProtocolPort: svcConf.metricPort, + AllowedCIDRs: svcConf.metricAllowedCIDRs, + LoadbalancerID: lbID, + Tags: []string{svcConf.lbName}, + } + } else { + listenerCreateOpt = lbaas.buildListenerCreateOpt(ctx, port, svcConf, name) + listenerCreateOpt.LoadbalancerID = lbID + } + + klog.V(2).Infof("Creating listener for port %d using protocol %s", listenerCreateOpt.ProtocolPort, listenerCreateOpt.Protocol) var err error listener, err = openstackutil.CreateListener(ctx, lbaas.lb, lbID, listenerCreateOpt) @@ -1108,53 +1155,59 @@ func (lbaas *LbaasV2) ensureOctaviaListener(ctx context.Context, lbID string, na } } - if svcConf.connLimit != listener.ConnLimit { - updateOpts.ConnLimit = &svcConf.connLimit - listenerChanged = true - } - - listenerKeepClientIP := listener.InsertHeaders[annotationXForwardedFor] == "true" - if svcConf.keepClientIP != listenerKeepClientIP { - updateOpts.InsertHeaders = &listener.InsertHeaders - if svcConf.keepClientIP { - if *updateOpts.InsertHeaders == nil { - *updateOpts.InsertHeaders = make(map[string]string) - } - (*updateOpts.InsertHeaders)[annotationXForwardedFor] = "true" - } else { - delete(*updateOpts.InsertHeaders, annotationXForwardedFor) - } - listenerChanged = true - } - if svcConf.tlsContainerRef != listener.DefaultTlsContainerRef { - updateOpts.DefaultTlsContainerRef = &svcConf.tlsContainerRef - listenerChanged = true - } - if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { - if svcConf.timeoutClientData != listener.TimeoutClientData { - updateOpts.TimeoutClientData = &svcConf.timeoutClientData + if isMetricListener { + if !cpoutil.StringListEqual(svcConf.metricAllowedCIDRs, listener.AllowedCIDRs) { + updateOpts.AllowedCIDRs = &svcConf.metricAllowedCIDRs listenerChanged = true } - if svcConf.timeoutMemberConnect != listener.TimeoutMemberConnect { - updateOpts.TimeoutMemberConnect = &svcConf.timeoutMemberConnect + } else { + if svcConf.connLimit != listener.ConnLimit { + updateOpts.ConnLimit = &svcConf.connLimit listenerChanged = true } - if svcConf.timeoutMemberData != listener.TimeoutMemberData { - updateOpts.TimeoutMemberData = &svcConf.timeoutMemberData + + listenerKeepClientIP := listener.InsertHeaders[annotationXForwardedFor] == "true" + if svcConf.keepClientIP != listenerKeepClientIP { + updateOpts.InsertHeaders = &listener.InsertHeaders + if svcConf.keepClientIP { + if *updateOpts.InsertHeaders == nil { + *updateOpts.InsertHeaders = make(map[string]string) + } + (*updateOpts.InsertHeaders)[annotationXForwardedFor] = "true" + } else { + delete(*updateOpts.InsertHeaders, annotationXForwardedFor) + } listenerChanged = true } - if svcConf.timeoutTCPInspect != listener.TimeoutTCPInspect { - updateOpts.TimeoutTCPInspect = &svcConf.timeoutTCPInspect + if svcConf.tlsContainerRef != listener.DefaultTlsContainerRef { + updateOpts.DefaultTlsContainerRef = &svcConf.tlsContainerRef listenerChanged = true } - } - if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { - if !cpoutil.StringListEqual(svcConf.allowedCIDR, listener.AllowedCIDRs) { - updateOpts.AllowedCIDRs = &svcConf.allowedCIDR - listenerChanged = true + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { + if svcConf.timeoutClientData != listener.TimeoutClientData { + updateOpts.TimeoutClientData = &svcConf.timeoutClientData + listenerChanged = true + } + if svcConf.timeoutMemberConnect != listener.TimeoutMemberConnect { + updateOpts.TimeoutMemberConnect = &svcConf.timeoutMemberConnect + listenerChanged = true + } + if svcConf.timeoutMemberData != listener.TimeoutMemberData { + updateOpts.TimeoutMemberData = &svcConf.timeoutMemberData + listenerChanged = true + } + if svcConf.timeoutTCPInspect != listener.TimeoutTCPInspect { + updateOpts.TimeoutTCPInspect = &svcConf.timeoutTCPInspect + listenerChanged = true + } + } + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { + if !cpoutil.StringListEqual(svcConf.allowedCIDR, listener.AllowedCIDRs) { + updateOpts.AllowedCIDRs = &svcConf.allowedCIDR + listenerChanged = true + } } } - if listenerChanged { klog.InfoS("Updating listener", "listenerID", listener.ID, "lbID", lbID, "updateOpts", updateOpts) if err := openstackutil.UpdateListener(ctx, lbaas.lb, lbID, listener.ID, updateOpts); err != nil { @@ -1163,7 +1216,6 @@ func (lbaas *LbaasV2) ensureOctaviaListener(ctx context.Context, lbID string, na klog.InfoS("Updated listener", "listenerID", listener.ID, "lbID", lbID) } } - return listener, nil } @@ -1452,6 +1504,12 @@ func (lbaas *LbaasV2) checkService(ctx context.Context, service *corev1.Service, svcConf.lbMemberSubnetID = memberSubnetID } + svcConf.metricEnabled = getBoolFromServiceAnnotation(service, ServiceAnnotationLoadBalancerMetricsEnabled, false) + if svcConf.metricEnabled { + svcConf.metricPort = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerMetricsPort, 9100) + svcConf.metricAllowedCIDRs = getStringArrayFromServiceAnnotationSeparatedByComma(service, ServiceAnnotationLoadBalancerMetricsAllowCidrs, []string{}) + } + if !svcConf.internal { var lbClass *LBClass var floatingNetworkID string @@ -1609,7 +1667,7 @@ func (lbaas *LbaasV2) makeSvcConf(ctx context.Context, serviceName string, servi } // checkListenerPorts checks if there is conflict for ports. -func (lbaas *LbaasV2) checkListenerPorts(service *corev1.Service, curListenerMapping map[listenerKey]*listeners.Listener, isLBOwner bool, lbName string) error { +func (lbaas *LbaasV2) checkListenerPorts(service *corev1.Service, curListenerMapping map[listenerKey]*listeners.Listener, isLBOwner bool, lbName string, extraPorts []corev1.ServicePort) error { for _, svcPort := range service.Spec.Ports { key := listenerKey{Protocol: listeners.Protocol(svcPort.Protocol), Port: int(svcPort.Port)} @@ -1622,6 +1680,21 @@ func (lbaas *LbaasV2) checkListenerPorts(service *corev1.Service, curListenerMap return fmt.Errorf("the listener port %d already exists", svcPort.Port) } } + // Check if extra ports do not intersect with the service's spec.Ports. + for _, extraPort := range extraPorts { + if extraPort.Protocol == svcPort.Protocol && extraPort.Port == svcPort.Port { + return fmt.Errorf("an extraPort (%s) conflicts with a Service Port %d/%s", extraPort.Name, extraPort.Port, string(extraPort.Protocol)) + } + } + } + + // Check if extra ports are not conflicting themselves. + for _, extraPort := range extraPorts { + for _, cmpExtraPort := range extraPorts { + if extraPort.Name != cmpExtraPort.Name && extraPort.Protocol == cmpExtraPort.Protocol && extraPort.Port == cmpExtraPort.Port { + return fmt.Errorf("some extraPorts (%s/%s) are in conflicts: %d/%s", extraPort.Name, cmpExtraPort.Name, extraPort.Port, string(extraPort.Protocol)) + } + } } return nil @@ -1778,13 +1851,24 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName } klog.V(4).InfoS("Existing listeners", "portProtocolMapping", curListenerMapping) + // setup extra ports list to check if there are no conflicts with Service's ones + var extraPortsList []corev1.ServicePort + if svcConf.metricEnabled && openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeaturePrometheusListener, lbaas.opts.LBProvider) { + extraPortsList = append(extraPortsList, + corev1.ServicePort{ + Name: "octavia-metric-endpoint", + Protocol: corev1.ProtocolTCP, + Port: int32(svcConf.metricPort), + }) + } + // Check port conflicts - if err := lbaas.checkListenerPorts(service, curListenerMapping, isLBOwner, lbName); err != nil { + if err := lbaas.checkListenerPorts(service, curListenerMapping, isLBOwner, lbName, extraPortsList); err != nil { return nil, err } for portIndex, port := range service.Spec.Ports { - listener, err := lbaas.ensureOctaviaListener(ctx, loadbalancer.ID, cpoutil.Sprintf255(listenerFormat, portIndex, lbName), curListenerMapping, port, svcConf) + listener, err := lbaas.ensureOctaviaListener(ctx, loadbalancer.ID, cpoutil.Sprintf255(listenerFormat, portIndex, lbName), curListenerMapping, port, svcConf, false) if err != nil { return nil, err } @@ -1804,6 +1888,22 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName curListeners = popListener(curListeners, listener.ID) } + // Check if we need to expose the metric endpoint + if svcConf.metricEnabled && openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeaturePrometheusListener, lbaas.opts.LBProvider) { + // Only a LB owner can add the prometheus listener (to avoid conflict with a shared loadbalancer) + if isLBOwner { + lbaas.updateServiceAnnotation(service, ServiceAnnotationLoadBalancerMetricsPort, strconv.Itoa(svcConf.metricPort)) + listener, err := lbaas.ensureOctaviaListener(ctx, loadbalancer.ID, cpoutil.Sprintf255(listenerFormatMetric, lbName), curListenerMapping, corev1.ServicePort{}, svcConf, true) + if err != nil { + return nil, err + } + curListeners = popListener(curListeners, listener.ID) + } else { + msg := "Metric Listener cannot be deployed on Service %s, only owner Service can do that" + lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBMetricListenerIgnored, msg, serviceName) + klog.Warningf(msg, serviceName) + } + } // Deal with the remaining listeners, delete the listener if it was created by this Service previously. if err := lbaas.deleteOctaviaListeners(ctx, loadbalancer.ID, curListeners, isLBOwner, lbName); err != nil { return nil, err @@ -1823,8 +1923,9 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName } } - // save address into the annotation + // save addresses into the annotations lbaas.updateServiceAnnotation(service, ServiceAnnotationLoadBalancerAddress, addr) + lbaas.updateServiceAnnotation(service, ServiceAnnotationLoadBalancerVIPAddress, loadbalancer.VipAddress) // add LB name to load balancer tags. if svcConf.supportLBTags { diff --git a/pkg/openstack/loadbalancer_test.go b/pkg/openstack/loadbalancer_test.go index 27304a45ad..da7cb4a900 100644 --- a/pkg/openstack/loadbalancer_test.go +++ b/pkg/openstack/loadbalancer_test.go @@ -2,6 +2,7 @@ package openstack import ( "context" + "errors" "fmt" "k8s.io/utils/ptr" "reflect" @@ -549,11 +550,12 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { curListenerMapping map[listenerKey]*listeners.Listener isLBOwner bool lbName string + extraPorts []corev1.ServicePort } tests := []struct { name string args args - wantErr bool + wantErr error }{ { name: "error is not thrown if loadbalancer matches & if port is already in use by a lb", @@ -578,10 +580,11 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { Tags: []string{"test-lb"}, }, }, - isLBOwner: false, - lbName: "test-lb", + isLBOwner: false, + lbName: "test-lb", + extraPorts: nil, }, - wantErr: false, + wantErr: nil, }, { name: "error is thrown if loadbalancer doesn't matches & if port is already in use by a service", @@ -606,10 +609,11 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { Tags: []string{"test-lb", "test-lb1"}, }, }, - isLBOwner: false, - lbName: "test-lb2", + isLBOwner: false, + lbName: "test-lb2", + extraPorts: nil, }, - wantErr: true, + wantErr: errors.New("already exists"), }, { name: "error is not thrown if lbOwner is present & no tags on service", @@ -633,10 +637,11 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { ID: "listenerid", }, }, - isLBOwner: true, - lbName: "test-lb", + isLBOwner: true, + lbName: "test-lb", + extraPorts: nil, }, - wantErr: false, + wantErr: nil, }, { name: "error is not thrown if lbOwner is true & there are tags on service", @@ -661,10 +666,11 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { Tags: []string{"test-lb"}, }, }, - isLBOwner: true, - lbName: "test-lb", + isLBOwner: true, + lbName: "test-lb", + extraPorts: nil, }, - wantErr: false, + wantErr: nil, }, { name: "error is not thrown if listener key doesn't match port & protocol", @@ -689,10 +695,126 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { Tags: []string{"test-lb"}, }, }, - isLBOwner: false, + isLBOwner: false, + lbName: "test-lb", + extraPorts: nil, + }, + wantErr: nil, + }, + { + name: "validate a legit extraPorts list", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: corev1.ProtocolTCP, + Port: 443, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "tcp", + Port: 443, + }: { + ID: "listenerid", + Tags: []string{"test-lb"}, + }, + }, + isLBOwner: true, + lbName: "test-lb", + extraPorts: []corev1.ServicePort{ + { + Name: "extra-port-tcp", + Protocol: corev1.ProtocolTCP, + Port: 9100, + }, + { + Name: "extra-port-udp", + Protocol: corev1.ProtocolUDP, + Port: 9100, + }, + }, + }, + wantErr: nil, + }, + { + name: "error is thrown if extra ports conflict with Service's ports", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: corev1.ProtocolTCP, + Port: 9100, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "tcp", + Port: 9100, + }: { + ID: "listenerid", + Tags: []string{"test-lb"}, + }, + }, + isLBOwner: true, + lbName: "test-lb", + extraPorts: []corev1.ServicePort{ + { + Name: "extra-port", + Protocol: corev1.ProtocolTCP, + Port: 9100, + }, + }, + }, + wantErr: errors.New("an extraPort (extra-port) conflicts with a Service Port 9100/TCP"), + }, + { + name: "error is thrown if extra ports conflict with another extra port", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: corev1.ProtocolTCP, + Port: 443, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "tcp", + Port: 443, + }: { + ID: "listenerid", + Tags: []string{"test-lb"}, + }, + }, + isLBOwner: true, lbName: "test-lb", + extraPorts: []corev1.ServicePort{ + { + Name: "extra-port", + Protocol: corev1.ProtocolTCP, + Port: 9100, + }, + { + Name: "another-extra-port", + Protocol: corev1.ProtocolTCP, + Port: 9100, + }, + }, }, - wantErr: false, + wantErr: errors.New("some extraPorts (extra-port/another-extra-port) are in conflicts: 9100/TCP"), }, } for _, tt := range tests { @@ -700,9 +822,9 @@ func TestLbaasV2_checkListenerPorts(t *testing.T) { lbaas := &LbaasV2{ LoadBalancer: LoadBalancer{}, } - err := lbaas.checkListenerPorts(tt.args.service, tt.args.curListenerMapping, tt.args.isLBOwner, tt.args.lbName) - if tt.wantErr == true { - assert.ErrorContains(t, err, "already exists") + err := lbaas.checkListenerPorts(tt.args.service, tt.args.curListenerMapping, tt.args.isLBOwner, tt.args.lbName, tt.args.extraPorts) + if tt.wantErr != nil { + assert.ErrorContains(t, err, tt.wantErr.Error()) } else { assert.NoError(t, err) } @@ -2675,3 +2797,82 @@ func Test_getProxyProtocolFromServiceAnnotation(t *testing.T) { }) } } + +func Test_getStringArrayFromServiceAnnotationSeparateByComma(t *testing.T) { + type args struct { + service *corev1.Service + annotationKey string + defaultSetting []string + } + tests := []struct { + name string + args args + want []string + }{ + { + name: "ensure string is well split", + args: struct { + service *corev1.Service + annotationKey string + defaultSetting []string + }{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"my-csv-annotation": "10.0.0.0/8, my-string-data"}, + }, + }, + annotationKey: "my-csv-annotation", + defaultSetting: []string{"10.0.0.0/8"}}, + want: []string{"10.0.0.0/8", "my-string-data"}, + }, + { + name: "ensure default is return when annotation doesn't exist", + args: struct { + service *corev1.Service + annotationKey string + defaultSetting []string + }{ + service: &corev1.Service{}, + annotationKey: "my-csv-annotation", + defaultSetting: []string{"10.0.0.0/8"}}, + want: []string{"10.0.0.0/8"}, + }, + { + name: "ensure empty array is returned when annotation has blank chars", + args: struct { + service *corev1.Service + annotationKey string + defaultSetting []string + }{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"my-csv-annotation": " , "}, + }, + }, + annotationKey: "my-csv-annotation", + defaultSetting: []string{"10.0.0.0/8"}}, + want: []string{}, + }, + { + name: "ensure empty array is returned when annotation is empty", + args: struct { + service *corev1.Service + annotationKey string + defaultSetting []string + }{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"my-csv-annotation": ""}, + }, + }, + annotationKey: "my-csv-annotation", + defaultSetting: []string{"10.0.0.0/8"}}, + want: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, getStringArrayFromServiceAnnotationSeparatedByComma(tt.args.service, tt.args.annotationKey, tt.args.defaultSetting), "getStringArrayFromServiceAnnotationSeparatedByComma(%v, %v, %v)", tt.args.service, tt.args.annotationKey, tt.args.defaultSetting) + }) + } +} diff --git a/pkg/util/openstack/loadbalancer.go b/pkg/util/openstack/loadbalancer.go index a9225fff44..e58a13d795 100644 --- a/pkg/util/openstack/loadbalancer.go +++ b/pkg/util/openstack/loadbalancer.go @@ -40,12 +40,13 @@ import ( ) const ( - OctaviaFeatureTags = 0 - OctaviaFeatureVIPACL = 1 - OctaviaFeatureFlavors = 2 - OctaviaFeatureTimeout = 3 - OctaviaFeatureAvailabilityZones = 4 - OctaviaFeatureHTTPMonitorsOnUDP = 5 + OctaviaFeatureTags = 0 + OctaviaFeatureVIPACL = 1 + OctaviaFeatureFlavors = 2 + OctaviaFeatureTimeout = 3 + OctaviaFeatureAvailabilityZones = 4 + OctaviaFeatureHTTPMonitorsOnUDP = 5 + OctaviaFeaturePrometheusListener = 6 waitLoadbalancerInitDelay = 1 * time.Second waitLoadbalancerFactor = 1.2 @@ -145,6 +146,14 @@ func IsOctaviaFeatureSupported(ctx context.Context, client *gophercloud.ServiceC if currentVer.GreaterThanOrEqual(verHTTPMonitorsOnUDP) { return true } + case OctaviaFeaturePrometheusListener: + if lbProvider == "ovn" { + return false + } + verACL, _ := version.NewVersion("v2.25") + if currentVer.GreaterThanOrEqual(verACL) { + return true + } default: klog.Warningf("Feature %d not recognized", feature) } diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 4a6cb7f615..8b7832b736 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -47,3 +47,70 @@ func TestStringToMap(t *testing.T) { }) } } + +func TestSplitTrim(t *testing.T) { + type args struct { + s string + sep rune + } + tests := []struct { + name string + args args + want []string + }{ + { + name: "csv style", + args: args{ + s: "10.0.0.0/8, my-string-data", + sep: ',', + }, + want: []string{ + "10.0.0.0/8", + "my-string-data", + }, + }, + { + name: "csv with (with a trim space separation)", + args: args{ + s: "10.0.0.0/8 my-string-data", + sep: ',', + }, + want: []string{ + "10.0.0.0/8", + "my-string-data", + }, + }, + { + name: "double comma", + args: args{ + s: ",10.0.0.0/8, , 192.168.0.0/24,,", + sep: ',', + }, + want: []string{ + "10.0.0.0/8", + "192.168.0.0/24", + }, + }, + { + name: "empty string with comma", + args: args{ + s: " , ", + sep: ',', + }, + want: []string{}, + }, + { + name: "empty string", + args: args{ + s: "", + sep: ',', + }, + want: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, SplitTrim(tt.args.s, tt.args.sep), "SplitTrim(%v, %v)", tt.args.s, tt.args.sep) + }) + } +} diff --git a/tests/e2e/cloudprovider/test-lb-service.sh b/tests/e2e/cloudprovider/test-lb-service.sh index cb57d6ec8c..ce8174ec6b 100755 --- a/tests/e2e/cloudprovider/test-lb-service.sh +++ b/tests/e2e/cloudprovider/test-lb-service.sh @@ -370,12 +370,12 @@ EOF wait_for_service_address ${service} printf "\n>>>>>>> Validating openstack load balancer \n" - lbid=$(openstack loadbalancer list -c id -c name | grep "octavia-lb-test_${service}" | awk '{print $2}') - if [[ -z $lbid ]]; then + lbID=$(openstack loadbalancer list -c id -c name | grep "octavia-lb-test_${service}" | awk '{print $2}') + if [[ -z $lbID ]]; then printf "\n>>>>>>> FAIL: Load balancer not found for Service ${service}\n" exit 1 fi - lb_info=$(openstack loadbalancer status show $lbid) + lb_info=$(openstack loadbalancer status show $lbID) listener_count=$(echo $lb_info | jq '.loadbalancer.listeners | length') member_ports=$(echo $lb_info | jq '.loadbalancer.listeners | .[].pools | .[].members | .[].protocol_port' | uniq | tr '\n' ' ') service_nodeports=$(kubectl -n $NAMESPACE get svc $service -o json | jq '.spec.ports | .[].nodePort' | tr '\n' ' ') @@ -395,11 +395,11 @@ EOF printf "\n>>>>>>> Removing port2 and update NodePort of port1.\n" kubectl -n $NAMESPACE patch svc $service --type json -p '[{"op": "remove","path": "/spec/ports/1"},{"op": "remove","path": "/spec/ports/0/nodePort"}]' - printf "\n>>>>>>> Waiting for load balancer $lbid ACTIVE.\n" - wait_for_loadbalancer $lbid + printf "\n>>>>>>> Waiting for load balancer $lbID ACTIVE.\n" + wait_for_loadbalancer $lbID printf "\n>>>>>>> Validating openstack load balancer after updating the service.\n" - lb_info=$(openstack loadbalancer status show $lbid) + lb_info=$(openstack loadbalancer status show $lbID) listener_count=$(echo $lb_info | jq '.loadbalancer.listeners | length') member_port=$(echo $lb_info | jq '.loadbalancer.listeners | .[].pools | .[].members | .[].protocol_port' | uniq) service_nodeport=$(kubectl -n $NAMESPACE get svc $service -o json | jq '.spec.ports | .[].nodePort') @@ -801,6 +801,80 @@ EOF fi } +######################################################################## +## Name: test_metric_endpoint +## Desc: Create a k8s service and check the metric endpoint exposition +## Params: None +######################################################################## +function test_metric_endpoint { + local service="test-metric-endpoint" + local metric_port="9101" + local allowed_cidrs_expected='"0.0.0.0/0"' + + if [[ ${OCTAVIA_PROVIDER} == "ovn" ]]; then + printf "\n>>>>>>> Skipping Service ${service} test for OVN provider\n" + return 0 + fi + + printf "\n>>>>>>> Create Service ${service}\n" + cat <>>>>>> Waiting for the Service ${service} creation finished\n" + wait_for_service_address ${service} + wait_address_accessible $ipaddr + + lbID=$(openstack loadbalancer list -c id -c name | grep "octavia-lb-test_${service}" | awk '{print $2}') + + openstack loadbalancer status show ${lbID} # Debug purpose + openstack loadbalancer listener show listener_metric_kube_service_kubernetes_octavia-lb-test_test-metric-endpoint # Debug purpose + sleep 60 # sleep to let some time to Octavia + openstack loadbalancer status show ${lbID} # Debug purpose + openstack loadbalancer listener show listener_metric_kube_service_kubernetes_octavia-lb-test_test-metric-endpoint # Debug purpose + + printf "\n>>>>>>> Sending request to the Metric endpoint ${service}\n" + metricFetch=$(curl -sS http://${ipaddr}:${metric_port}/metrics) + # ensure a metric is returned by the endpoint + if [[ "$metricFetch" == *"octavia_loadbalancer_cpu"* ]]; then + printf "\n>>>>>>> Expected: Get correct response from Service ${service}\n" + else + printf "\n>>>>>>> FAIL: Get incorrect response from Service ${service}, expected: octavia_loadbalancer_cpu, actual: ${metricFetch}\n" + curl -sSv http://${ipaddr}:${metric_port}/metrics + exit 1 + fi + + printf "\n>>>>>>> Checking Metric endpoint's configuration (allowed cidrs)\n" + metricListenerId=$(openstack loadbalancer status show ${lbID} | jq -r '.loadbalancer.listeners[] | select(.name | startswith("listener_metric_kube_service")) | .id') + cidrs=$(openstack loadbalancer listener show ${metricListenerId} -f json | jq '.allowed_cidrs') + # ensure allowed cidrs are well filled on octavia side + if [[ "$cidrs" == "$allowed_cidrs_expected" ]] ; then + printf "\n>>>>>>> Expected: Get correct response from Metric endpoint's configuration\n" + else + printf "\n>>>>>>> FAIL: Get incorrect Metric's configuration, expected: ${allowed_cidrs_expected}, actual: ${cidrs}\n" + exit 1 + fi + + printf "\n>>>>>>> Delete Service ${service}\n" + kubectl -n $NAMESPACE delete service ${service} +} + create_namespace create_deployment set_openstack_credentials @@ -810,3 +884,4 @@ test_forwarded test_update_port test_shared_lb test_shared_user_lb +test_metric_endpoint