Skip to content

Commit b327261

Browse files
committed
Add Observability field to Target Allocator CRD
1 parent 5bc5b75 commit b327261

File tree

4 files changed

+228
-2
lines changed

4 files changed

+228
-2
lines changed

apis/v1beta1/targetallocator_types.go

+6
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,12 @@ type TargetAllocatorSpec struct {
8282
// PrometheusCR defines the configuration for the retrieval of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 and podmonitor.monitoring.coreos.com/v1 ).
8383
// +optional
8484
PrometheusCR TargetAllocatorPrometheusCR `json:"prometheusCR,omitempty"`
85+
// ObservabilitySpec defines how telemetry data gets handled.
86+
//
87+
// +optional
88+
// +kubebuilder:validation:Optional
89+
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Observability"
90+
Observability ObservabilitySpec `json:"observability,omitempty"`
8591
}
8692

8793
// TargetAllocatorPrometheusCR configures Prometheus CustomResource handling in the Target Allocator.

apis/v1beta1/zz_generated.deepcopy.go

+1
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

internal/manifests/collector/targetallocator.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -57,20 +57,22 @@ func TargetAllocator(params manifests.Params) (*v1beta1.TargetAllocator, error)
5757
NodeSelector: taSpec.NodeSelector,
5858
Resources: taSpec.Resources,
5959
ServiceAccount: taSpec.ServiceAccount,
60-
SecurityContext: taSpec.SecurityContext,
61-
PodSecurityContext: taSpec.PodSecurityContext,
6260
Image: taSpec.Image,
6361
Affinity: taSpec.Affinity,
62+
SecurityContext: taSpec.SecurityContext,
63+
PodSecurityContext: taSpec.PodSecurityContext,
6464
TopologySpreadConstraints: taSpec.TopologySpreadConstraints,
6565
Tolerations: taSpec.Tolerations,
6666
Env: taSpec.Env,
6767
PodAnnotations: params.OtelCol.Spec.PodAnnotations,
68+
PodDisruptionBudget: taSpec.PodDisruptionBudget,
6869
},
6970
CollectorSelector: collectorSelector,
7071
AllocationStrategy: taSpec.AllocationStrategy,
7172
FilterStrategy: taSpec.FilterStrategy,
7273
ScrapeConfigs: scrapeConfigs,
7374
PrometheusCR: taSpec.PrometheusCR,
75+
Observability: taSpec.Observability,
7476
},
7577
}, nil
7678
}

internal/manifests/collector/targetallocator_test.go

+217
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,14 @@ package collector
1717
import (
1818
"fmt"
1919
"testing"
20+
"time"
2021

2122
"github.com/stretchr/testify/assert"
2223
"github.com/stretchr/testify/require"
24+
v1 "k8s.io/api/core/v1"
25+
"k8s.io/apimachinery/pkg/api/resource"
2326
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27+
"k8s.io/apimachinery/pkg/util/intstr"
2428

2529
"github.com/open-telemetry/opentelemetry-operator/apis/v1beta1"
2630
"github.com/open-telemetry/opentelemetry-operator/internal/manifests"
@@ -38,6 +42,11 @@ func TestTargetAllocator(t *testing.T) {
3842
"label_key": "label_value",
3943
},
4044
}
45+
replicas := int32(2)
46+
runAsNonRoot := true
47+
privileged := true
48+
runAsUser := int64(1337)
49+
runasGroup := int64(1338)
4150
otelcolConfig := v1beta1.Config{
4251
Receivers: v1beta1.AnyConfig{
4352
Object: map[string]interface{}{
@@ -88,6 +97,214 @@ func TestTargetAllocator(t *testing.T) {
8897
},
8998
},
9099
},
100+
{
101+
name: "full",
102+
input: v1beta1.OpenTelemetryCollector{
103+
ObjectMeta: objectMetadata,
104+
Spec: v1beta1.OpenTelemetryCollectorSpec{
105+
TargetAllocator: v1beta1.TargetAllocatorEmbedded{
106+
Replicas: &replicas,
107+
NodeSelector: map[string]string{"key": "value"},
108+
Resources: v1.ResourceRequirements{
109+
Limits: v1.ResourceList{
110+
v1.ResourceCPU: resource.MustParse("500m"),
111+
v1.ResourceMemory: resource.MustParse("128Mi"),
112+
},
113+
Requests: v1.ResourceList{
114+
v1.ResourceCPU: resource.MustParse("500m"),
115+
v1.ResourceMemory: resource.MustParse("128Mi"),
116+
},
117+
},
118+
AllocationStrategy: v1beta1.TargetAllocatorAllocationStrategyConsistentHashing,
119+
FilterStrategy: "relabel-config",
120+
ServiceAccount: "serviceAccountName",
121+
Image: "custom_image",
122+
Enabled: true,
123+
Affinity: &v1.Affinity{
124+
NodeAffinity: &v1.NodeAffinity{
125+
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
126+
NodeSelectorTerms: []v1.NodeSelectorTerm{
127+
{
128+
MatchExpressions: []v1.NodeSelectorRequirement{
129+
{
130+
Key: "node",
131+
Operator: v1.NodeSelectorOpIn,
132+
Values: []string{"test-node"},
133+
},
134+
},
135+
},
136+
},
137+
},
138+
},
139+
},
140+
PrometheusCR: v1beta1.TargetAllocatorPrometheusCR{
141+
Enabled: true,
142+
ScrapeInterval: &metav1.Duration{Duration: time.Second},
143+
PodMonitorSelector: &metav1.LabelSelector{
144+
MatchLabels: map[string]string{"podmonitorkey": "podmonitorvalue"},
145+
},
146+
ServiceMonitorSelector: &metav1.LabelSelector{
147+
MatchLabels: map[string]string{"servicemonitorkey": "servicemonitorkey"},
148+
},
149+
},
150+
PodSecurityContext: &v1.PodSecurityContext{
151+
RunAsNonRoot: &runAsNonRoot,
152+
RunAsUser: &runAsUser,
153+
RunAsGroup: &runasGroup,
154+
},
155+
SecurityContext: &v1.SecurityContext{
156+
RunAsUser: &runAsUser,
157+
Privileged: &privileged,
158+
},
159+
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
160+
{
161+
MaxSkew: 1,
162+
TopologyKey: "kubernetes.io/hostname",
163+
WhenUnsatisfiable: "DoNotSchedule",
164+
LabelSelector: &metav1.LabelSelector{
165+
MatchLabels: map[string]string{
166+
"foo": "bar",
167+
},
168+
},
169+
},
170+
},
171+
Tolerations: []v1.Toleration{
172+
{
173+
Key: "hii",
174+
Value: "greeting",
175+
Effect: "NoSchedule",
176+
},
177+
},
178+
Env: []v1.EnvVar{
179+
{
180+
Name: "POD_NAME",
181+
ValueFrom: &v1.EnvVarSource{
182+
FieldRef: &v1.ObjectFieldSelector{
183+
FieldPath: "metadata.name",
184+
},
185+
},
186+
},
187+
},
188+
Observability: v1beta1.ObservabilitySpec{
189+
Metrics: v1beta1.MetricsConfigSpec{
190+
EnableMetrics: true,
191+
},
192+
},
193+
PodDisruptionBudget: &v1beta1.PodDisruptionBudgetSpec{
194+
MaxUnavailable: &intstr.IntOrString{
195+
Type: intstr.Int,
196+
IntVal: 1,
197+
},
198+
},
199+
},
200+
Config: otelcolConfig,
201+
},
202+
},
203+
want: &v1beta1.TargetAllocator{
204+
ObjectMeta: objectMetadata,
205+
Spec: v1beta1.TargetAllocatorSpec{
206+
OpenTelemetryCommonFields: v1beta1.OpenTelemetryCommonFields{
207+
Replicas: &replicas,
208+
NodeSelector: map[string]string{"key": "value"},
209+
Resources: v1.ResourceRequirements{
210+
Limits: v1.ResourceList{
211+
v1.ResourceCPU: resource.MustParse("500m"),
212+
v1.ResourceMemory: resource.MustParse("128Mi"),
213+
},
214+
Requests: v1.ResourceList{
215+
v1.ResourceCPU: resource.MustParse("500m"),
216+
v1.ResourceMemory: resource.MustParse("128Mi"),
217+
},
218+
},
219+
ServiceAccount: "serviceAccountName",
220+
Image: "custom_image",
221+
Affinity: &v1.Affinity{
222+
NodeAffinity: &v1.NodeAffinity{
223+
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
224+
NodeSelectorTerms: []v1.NodeSelectorTerm{
225+
{
226+
MatchExpressions: []v1.NodeSelectorRequirement{
227+
{
228+
Key: "node",
229+
Operator: v1.NodeSelectorOpIn,
230+
Values: []string{"test-node"},
231+
},
232+
},
233+
},
234+
},
235+
},
236+
},
237+
},
238+
PodSecurityContext: &v1.PodSecurityContext{
239+
RunAsNonRoot: &runAsNonRoot,
240+
RunAsUser: &runAsUser,
241+
RunAsGroup: &runasGroup,
242+
},
243+
SecurityContext: &v1.SecurityContext{
244+
RunAsUser: &runAsUser,
245+
Privileged: &privileged,
246+
},
247+
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
248+
{
249+
MaxSkew: 1,
250+
TopologyKey: "kubernetes.io/hostname",
251+
WhenUnsatisfiable: "DoNotSchedule",
252+
LabelSelector: &metav1.LabelSelector{
253+
MatchLabels: map[string]string{
254+
"foo": "bar",
255+
},
256+
},
257+
},
258+
},
259+
Tolerations: []v1.Toleration{
260+
{
261+
Key: "hii",
262+
Value: "greeting",
263+
Effect: "NoSchedule",
264+
},
265+
},
266+
Env: []v1.EnvVar{
267+
{
268+
Name: "POD_NAME",
269+
ValueFrom: &v1.EnvVarSource{
270+
FieldRef: &v1.ObjectFieldSelector{
271+
FieldPath: "metadata.name",
272+
},
273+
},
274+
},
275+
},
276+
277+
PodDisruptionBudget: &v1beta1.PodDisruptionBudgetSpec{
278+
MaxUnavailable: &intstr.IntOrString{
279+
Type: intstr.Int,
280+
IntVal: 1,
281+
},
282+
},
283+
},
284+
CollectorSelector: metav1.LabelSelector{
285+
MatchLabels: manifestutils.SelectorLabels(objectMetadata, ComponentOpenTelemetryCollector),
286+
},
287+
AllocationStrategy: v1beta1.TargetAllocatorAllocationStrategyConsistentHashing,
288+
FilterStrategy: v1beta1.TargetAllocatorFilterStrategyRelabelConfig,
289+
PrometheusCR: v1beta1.TargetAllocatorPrometheusCR{
290+
Enabled: true,
291+
ScrapeInterval: &metav1.Duration{Duration: time.Second},
292+
PodMonitorSelector: &metav1.LabelSelector{
293+
MatchLabels: map[string]string{"podmonitorkey": "podmonitorvalue"},
294+
},
295+
ServiceMonitorSelector: &metav1.LabelSelector{
296+
MatchLabels: map[string]string{"servicemonitorkey": "servicemonitorkey"},
297+
},
298+
},
299+
ScrapeConfigs: []v1beta1.AnyConfig{},
300+
Observability: v1beta1.ObservabilitySpec{
301+
Metrics: v1beta1.MetricsConfigSpec{
302+
EnableMetrics: true,
303+
},
304+
},
305+
},
306+
},
307+
},
91308
}
92309

93310
for _, testCase := range testCases {

0 commit comments

Comments
 (0)