Skip to content

Commit c79f185

Browse files
authored
Add v1alpha2 embedded Target Allocator struct (#2623)
Add and use the embedded Target Allocator struct for v1alpha2. The changes mostly involve label selectors for Prometheus CR. Worth noting that this involves a change in semantics for said selectors, as an empty selector now selects nothing, which is in line with the standard, but different from v1alpha1.
1 parent 4d26028 commit c79f185

15 files changed

+393
-104
lines changed

apis/v1alpha2/allocation_strategy.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,5 +31,5 @@ const (
3131
TargetAllocatorAllocationStrategyConsistentHashing TargetAllocatorAllocationStrategy = "consistent-hashing"
3232

3333
// TargetAllocatorFilterStrategyRelabelConfig targets will be consistently drops targets based on the relabel_config.
34-
TargetAllocatorFilterStrategyRelabelConfig TargetAllocatorFilterStrategy = "consistent-hashing"
34+
TargetAllocatorFilterStrategyRelabelConfig TargetAllocatorFilterStrategy = "relabel-config"
3535
)

apis/v1alpha2/opentelemetrycollector_types.go

+80-1
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package v1alpha2
1818

1919
import (
2020
appsv1 "k8s.io/api/apps/v1"
21+
v1 "k8s.io/api/core/v1"
2122
networkingv1 "k8s.io/api/networking/v1"
2223
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2324

@@ -120,7 +121,7 @@ type OpenTelemetryCollectorSpec struct {
120121
OpenTelemetryCommonFields `json:",inline"`
121122
// TargetAllocator indicates a value which determines whether to spawn a target allocation resource or not.
122123
// +optional
123-
TargetAllocator v1alpha1.OpenTelemetryTargetAllocator `json:"targetAllocator,omitempty"`
124+
TargetAllocator TargetAllocatorEmbedded `json:"targetAllocator,omitempty"`
124125
// Mode represents how the collector should be deployed (deployment, daemonset, statefulset or sidecar)
125126
// +optional
126127
Mode Mode `json:"mode,omitempty"`
@@ -165,6 +166,84 @@ type OpenTelemetryCollectorSpec struct {
165166
DeploymentUpdateStrategy appsv1.DeploymentStrategy `json:"deploymentUpdateStrategy,omitempty"`
166167
}
167168

169+
// TargetAllocatorEmbedded defines the configuration for the Prometheus target allocator, embedded in the
170+
// OpenTelemetryCollector spec.
171+
type TargetAllocatorEmbedded struct {
172+
// Replicas is the number of pod instances for the underlying TargetAllocator. This should only be set to a value
173+
// other than 1 if a strategy that allows for high availability is chosen. Currently, the only allocation strategy
174+
// that can be run in a high availability mode is consistent-hashing.
175+
// +optional
176+
Replicas *int32 `json:"replicas,omitempty"`
177+
// NodeSelector to schedule OpenTelemetry TargetAllocator pods.
178+
// +optional
179+
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
180+
// Resources to set on the OpenTelemetryTargetAllocator containers.
181+
// +optional
182+
Resources v1.ResourceRequirements `json:"resources,omitempty"`
183+
// AllocationStrategy determines which strategy the target allocator should use for allocation.
184+
// The current options are least-weighted, consistent-hashing and per-node. The default is
185+
// consistent-hashing.
186+
// +optional
187+
// +kubebuilder:default:=consistent-hashing
188+
AllocationStrategy TargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
189+
// FilterStrategy determines how to filter targets before allocating them among the collectors.
190+
// The only current option is relabel-config (drops targets based on prom relabel_config).
191+
// The default is relabel-config.
192+
// +optional
193+
// +kubebuilder:default:=relabel-config
194+
FilterStrategy TargetAllocatorFilterStrategy `json:"filterStrategy,omitempty"`
195+
// ServiceAccount indicates the name of an existing service account to use with this instance. When set,
196+
// the operator will not automatically create a ServiceAccount for the TargetAllocator.
197+
// +optional
198+
ServiceAccount string `json:"serviceAccount,omitempty"`
199+
// Image indicates the container image to use for the OpenTelemetry TargetAllocator.
200+
// +optional
201+
Image string `json:"image,omitempty"`
202+
// Enabled indicates whether to use a target allocation mechanism for Prometheus targets or not.
203+
// +optional
204+
Enabled bool `json:"enabled,omitempty"`
205+
// If specified, indicates the pod's scheduling constraints
206+
// +optional
207+
Affinity *v1.Affinity `json:"affinity,omitempty"`
208+
// PrometheusCR defines the configuration for the retrieval of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 and podmonitor.monitoring.coreos.com/v1 ) retrieval.
209+
// All CR instances which the ServiceAccount has access to will be retrieved. This includes other namespaces.
210+
// +optional
211+
PrometheusCR TargetAllocatorPrometheusCR `json:"prometheusCR,omitempty"`
212+
// SecurityContext configures the container security context for
213+
// the targetallocator.
214+
// +optional
215+
SecurityContext *v1.SecurityContext `json:"securityContext,omitempty"`
216+
// PodSecurityContext configures the pod security context for the
217+
// targetallocator.
218+
// +optional
219+
PodSecurityContext *v1.PodSecurityContext `json:"podSecurityContext,omitempty"`
220+
// TopologySpreadConstraints embedded kubernetes pod configuration option,
221+
// controls how pods are spread across your cluster among failure-domains
222+
// such as regions, zones, nodes, and other user-defined topology domains
223+
// https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
224+
// +optional
225+
TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
226+
// Toleration embedded kubernetes pod configuration option,
227+
// controls how pods can be scheduled with matching taints
228+
// +optional
229+
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
230+
// ENV vars to set on the OpenTelemetry TargetAllocator's Pods. These can then in certain cases be
231+
// consumed in the config file for the TargetAllocator.
232+
// +optional
233+
Env []v1.EnvVar `json:"env,omitempty"`
234+
// ObservabilitySpec defines how telemetry data gets handled.
235+
//
236+
// +optional
237+
// +kubebuilder:validation:Optional
238+
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Observability"
239+
Observability v1alpha1.ObservabilitySpec `json:"observability,omitempty"`
240+
// PodDisruptionBudget specifies the pod disruption budget configuration to use
241+
// for the target allocator workload.
242+
//
243+
// +optional
244+
PodDisruptionBudget *PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty"`
245+
}
246+
168247
// OpenTelemetryCollectorStatus defines the observed state of OpenTelemetryCollector.
169248
type OpenTelemetryCollectorStatus struct {
170249
// Scale is the OpenTelemetryCollector's scale subresource status.

apis/v1alpha2/zz_generated.deepcopy.go

+71
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

controllers/builder_test.go

+10-18
Original file line numberDiff line numberDiff line change
@@ -1150,10 +1150,10 @@ service:
11501150
},
11511151
Mode: "statefulset",
11521152
Config: goodConfig,
1153-
TargetAllocator: v1alpha1.OpenTelemetryTargetAllocator{
1153+
TargetAllocator: v1alpha2.TargetAllocatorEmbedded{
11541154
Enabled: true,
11551155
FilterStrategy: "relabel-config",
1156-
PrometheusCR: v1alpha1.OpenTelemetryTargetAllocatorPrometheusCR{
1156+
PrometheusCR: v1alpha2.TargetAllocatorPrometheusCR{
11571157
Enabled: true,
11581158
},
11591159
},
@@ -1371,12 +1371,8 @@ label_selector:
13711371
app.kubernetes.io/managed-by: opentelemetry-operator
13721372
app.kubernetes.io/part-of: opentelemetry
13731373
prometheus_cr:
1374-
pod_monitor_selector:
1375-
matchlabels: {}
1376-
matchexpressions: []
1377-
service_monitor_selector:
1378-
matchlabels: {}
1379-
matchexpressions: []
1374+
pod_monitor_selector: null
1375+
service_monitor_selector: null
13801376
`,
13811377
},
13821378
},
@@ -1409,7 +1405,7 @@ prometheus_cr:
14091405
"app.kubernetes.io/version": "latest",
14101406
},
14111407
Annotations: map[string]string{
1412-
"opentelemetry-targetallocator-config/hash": "51477b182d2c9e7c0db27a2cbc9c7d35b24895b1cf0774d51a41b8d1753696ed",
1408+
"opentelemetry-targetallocator-config/hash": "59307aaa5652c8723f7803aa2d2b631389d1a0267444a4a8dc559878b5c4aa2c",
14131409
},
14141410
},
14151411
Spec: corev1.PodSpec{
@@ -1546,9 +1542,9 @@ prometheus_cr:
15461542
},
15471543
Mode: "statefulset",
15481544
Config: goodConfig,
1549-
TargetAllocator: v1alpha1.OpenTelemetryTargetAllocator{
1545+
TargetAllocator: v1alpha2.TargetAllocatorEmbedded{
15501546
Enabled: true,
1551-
PrometheusCR: v1alpha1.OpenTelemetryTargetAllocatorPrometheusCR{
1547+
PrometheusCR: v1alpha2.TargetAllocatorPrometheusCR{
15521548
Enabled: true,
15531549
},
15541550
FilterStrategy: "relabel-config",
@@ -1772,12 +1768,8 @@ label_selector:
17721768
app.kubernetes.io/managed-by: opentelemetry-operator
17731769
app.kubernetes.io/part-of: opentelemetry
17741770
prometheus_cr:
1775-
pod_monitor_selector:
1776-
matchlabels: {}
1777-
matchexpressions: []
1778-
service_monitor_selector:
1779-
matchlabels: {}
1780-
matchexpressions: []
1771+
pod_monitor_selector: null
1772+
service_monitor_selector: null
17811773
`,
17821774
},
17831775
},
@@ -1810,7 +1802,7 @@ prometheus_cr:
18101802
"app.kubernetes.io/version": "latest",
18111803
},
18121804
Annotations: map[string]string{
1813-
"opentelemetry-targetallocator-config/hash": "51477b182d2c9e7c0db27a2cbc9c7d35b24895b1cf0774d51a41b8d1753696ed",
1805+
"opentelemetry-targetallocator-config/hash": "59307aaa5652c8723f7803aa2d2b631389d1a0267444a4a8dc559878b5c4aa2c",
18141806
},
18151807
},
18161808
Spec: corev1.PodSpec{

controllers/reconcile_test.go

+2
Original file line numberDiff line numberDiff line change
@@ -468,6 +468,8 @@ func TestOpenTelemetryCollectorReconciler_Reconcile(t *testing.T) {
468468
taConfig["config"] = promConfig["config"]
469469
taConfig["allocation_strategy"] = "consistent-hashing"
470470
taConfig["filter_strategy"] = "relabel-config"
471+
taConfig["pod_monitor_selector"] = map[string]string{}
472+
taConfig["service_monitor_selector"] = map[string]string{}
471473
taConfig["prometheus_cr"] = map[string]any{
472474
"scrape_interval": "30s",
473475
"pod_monitor_selector": &metav1.LabelSelector{},

internal/api/convert/v1alpha.go

+41-24
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ import (
1818
"errors"
1919

2020
"gopkg.in/yaml.v3"
21+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2122

2223
"github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1"
2324
"github.com/open-telemetry/opentelemetry-operator/apis/v1alpha2"
@@ -93,30 +94,7 @@ func V1Alpha1to2(in v1alpha1.OpenTelemetryCollector) (v1alpha2.OpenTelemetryColl
9394
out.Spec.OpenTelemetryCommonFields.InitContainers = copy.Spec.InitContainers
9495
out.Spec.OpenTelemetryCommonFields.AdditionalContainers = copy.Spec.AdditionalContainers
9596

96-
out.Spec.TargetAllocator.Replicas = copy.Spec.TargetAllocator.Replicas
97-
out.Spec.TargetAllocator.NodeSelector = copy.Spec.TargetAllocator.NodeSelector
98-
out.Spec.TargetAllocator.Resources = copy.Spec.TargetAllocator.Resources
99-
out.Spec.TargetAllocator.AllocationStrategy = copy.Spec.TargetAllocator.AllocationStrategy
100-
out.Spec.TargetAllocator.FilterStrategy = copy.Spec.TargetAllocator.FilterStrategy
101-
out.Spec.TargetAllocator.ServiceAccount = copy.Spec.TargetAllocator.ServiceAccount
102-
out.Spec.TargetAllocator.Image = copy.Spec.TargetAllocator.Image
103-
out.Spec.TargetAllocator.Enabled = copy.Spec.TargetAllocator.Enabled
104-
out.Spec.TargetAllocator.Affinity = copy.Spec.TargetAllocator.Affinity
105-
out.Spec.TargetAllocator.PrometheusCR.Enabled = copy.Spec.TargetAllocator.PrometheusCR.Enabled
106-
out.Spec.TargetAllocator.PrometheusCR.ScrapeInterval = copy.Spec.TargetAllocator.PrometheusCR.ScrapeInterval
107-
out.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector = copy.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector
108-
out.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector = copy.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector
109-
out.Spec.TargetAllocator.SecurityContext = copy.Spec.TargetAllocator.SecurityContext
110-
out.Spec.TargetAllocator.PodSecurityContext = copy.Spec.TargetAllocator.PodSecurityContext
111-
out.Spec.TargetAllocator.TopologySpreadConstraints = copy.Spec.TargetAllocator.TopologySpreadConstraints
112-
out.Spec.TargetAllocator.Tolerations = copy.Spec.TargetAllocator.Tolerations
113-
out.Spec.TargetAllocator.Env = copy.Spec.TargetAllocator.Env
114-
out.Spec.TargetAllocator.Observability = v1alpha1.ObservabilitySpec{
115-
Metrics: v1alpha1.MetricsConfigSpec{
116-
EnableMetrics: copy.Spec.TargetAllocator.Observability.Metrics.EnableMetrics,
117-
},
118-
}
119-
out.Spec.TargetAllocator.PodDisruptionBudget = copy.Spec.TargetAllocator.PodDisruptionBudget
97+
out.Spec.TargetAllocator = TargetAllocatorEmbedded(copy.Spec.TargetAllocator)
12098

12199
out.Spec.Mode = v1alpha2.Mode(copy.Spec.Mode)
122100
out.Spec.UpgradeStrategy = v1alpha2.UpgradeStrategy(copy.Spec.UpgradeStrategy)
@@ -148,3 +126,42 @@ func V1Alpha1to2(in v1alpha1.OpenTelemetryCollector) (v1alpha2.OpenTelemetryColl
148126

149127
return out, nil
150128
}
129+
130+
func TargetAllocatorEmbedded(in v1alpha1.OpenTelemetryTargetAllocator) v1alpha2.TargetAllocatorEmbedded {
131+
out := v1alpha2.TargetAllocatorEmbedded{}
132+
out.Replicas = in.Replicas
133+
out.NodeSelector = in.NodeSelector
134+
out.Resources = in.Resources
135+
out.AllocationStrategy = v1alpha2.TargetAllocatorAllocationStrategy(in.AllocationStrategy)
136+
out.FilterStrategy = v1alpha2.TargetAllocatorFilterStrategy(in.FilterStrategy)
137+
out.ServiceAccount = in.ServiceAccount
138+
out.Image = in.Image
139+
out.Enabled = in.Enabled
140+
out.Affinity = in.Affinity
141+
out.PrometheusCR.Enabled = in.PrometheusCR.Enabled
142+
out.PrometheusCR.ScrapeInterval = in.PrometheusCR.ScrapeInterval
143+
out.SecurityContext = in.SecurityContext
144+
out.PodSecurityContext = in.PodSecurityContext
145+
out.TopologySpreadConstraints = in.TopologySpreadConstraints
146+
out.Tolerations = in.Tolerations
147+
out.Env = in.Env
148+
out.Observability = v1alpha1.ObservabilitySpec{
149+
Metrics: v1alpha1.MetricsConfigSpec{
150+
EnableMetrics: in.Observability.Metrics.EnableMetrics,
151+
},
152+
}
153+
154+
out.PrometheusCR.PodMonitorSelector = &metav1.LabelSelector{
155+
MatchLabels: in.PrometheusCR.PodMonitorSelector,
156+
}
157+
out.PrometheusCR.ServiceMonitorSelector = &metav1.LabelSelector{
158+
MatchLabels: in.PrometheusCR.ServiceMonitorSelector,
159+
}
160+
if in.PodDisruptionBudget != nil {
161+
out.PodDisruptionBudget = &v1alpha2.PodDisruptionBudgetSpec{
162+
MinAvailable: in.PodDisruptionBudget.MinAvailable,
163+
MaxUnavailable: in.PodDisruptionBudget.MaxUnavailable,
164+
}
165+
}
166+
return out
167+
}

0 commit comments

Comments
 (0)