@@ -18,6 +18,7 @@ package v1alpha2
18
18
19
19
import (
20
20
appsv1 "k8s.io/api/apps/v1"
21
+ v1 "k8s.io/api/core/v1"
21
22
networkingv1 "k8s.io/api/networking/v1"
22
23
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
23
24
@@ -120,7 +121,7 @@ type OpenTelemetryCollectorSpec struct {
120
121
OpenTelemetryCommonFields `json:",inline"`
121
122
// TargetAllocator indicates a value which determines whether to spawn a target allocation resource or not.
122
123
// +optional
123
- TargetAllocator v1alpha1. OpenTelemetryTargetAllocator `json:"targetAllocator,omitempty"`
124
+ TargetAllocator TargetAllocatorEmbedded `json:"targetAllocator,omitempty"`
124
125
// Mode represents how the collector should be deployed (deployment, daemonset, statefulset or sidecar)
125
126
// +optional
126
127
Mode Mode `json:"mode,omitempty"`
@@ -165,6 +166,84 @@ type OpenTelemetryCollectorSpec struct {
165
166
DeploymentUpdateStrategy appsv1.DeploymentStrategy `json:"deploymentUpdateStrategy,omitempty"`
166
167
}
167
168
169
+ // TargetAllocatorEmbedded defines the configuration for the Prometheus target allocator, embedded in the
170
+ // OpenTelemetryCollector spec.
171
+ type TargetAllocatorEmbedded struct {
172
+ // Replicas is the number of pod instances for the underlying TargetAllocator. This should only be set to a value
173
+ // other than 1 if a strategy that allows for high availability is chosen. Currently, the only allocation strategy
174
+ // that can be run in a high availability mode is consistent-hashing.
175
+ // +optional
176
+ Replicas * int32 `json:"replicas,omitempty"`
177
+ // NodeSelector to schedule OpenTelemetry TargetAllocator pods.
178
+ // +optional
179
+ NodeSelector map [string ]string `json:"nodeSelector,omitempty"`
180
+ // Resources to set on the OpenTelemetryTargetAllocator containers.
181
+ // +optional
182
+ Resources v1.ResourceRequirements `json:"resources,omitempty"`
183
+ // AllocationStrategy determines which strategy the target allocator should use for allocation.
184
+ // The current options are least-weighted, consistent-hashing and per-node. The default is
185
+ // consistent-hashing.
186
+ // +optional
187
+ // +kubebuilder:default:=consistent-hashing
188
+ AllocationStrategy TargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
189
+ // FilterStrategy determines how to filter targets before allocating them among the collectors.
190
+ // The only current option is relabel-config (drops targets based on prom relabel_config).
191
+ // The default is relabel-config.
192
+ // +optional
193
+ // +kubebuilder:default:=relabel-config
194
+ FilterStrategy TargetAllocatorFilterStrategy `json:"filterStrategy,omitempty"`
195
+ // ServiceAccount indicates the name of an existing service account to use with this instance. When set,
196
+ // the operator will not automatically create a ServiceAccount for the TargetAllocator.
197
+ // +optional
198
+ ServiceAccount string `json:"serviceAccount,omitempty"`
199
+ // Image indicates the container image to use for the OpenTelemetry TargetAllocator.
200
+ // +optional
201
+ Image string `json:"image,omitempty"`
202
+ // Enabled indicates whether to use a target allocation mechanism for Prometheus targets or not.
203
+ // +optional
204
+ Enabled bool `json:"enabled,omitempty"`
205
+ // If specified, indicates the pod's scheduling constraints
206
+ // +optional
207
+ Affinity * v1.Affinity `json:"affinity,omitempty"`
208
+ // PrometheusCR defines the configuration for the retrieval of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 and podmonitor.monitoring.coreos.com/v1 ) retrieval.
209
+ // All CR instances which the ServiceAccount has access to will be retrieved. This includes other namespaces.
210
+ // +optional
211
+ PrometheusCR TargetAllocatorPrometheusCR `json:"prometheusCR,omitempty"`
212
+ // SecurityContext configures the container security context for
213
+ // the targetallocator.
214
+ // +optional
215
+ SecurityContext * v1.SecurityContext `json:"securityContext,omitempty"`
216
+ // PodSecurityContext configures the pod security context for the
217
+ // targetallocator.
218
+ // +optional
219
+ PodSecurityContext * v1.PodSecurityContext `json:"podSecurityContext,omitempty"`
220
+ // TopologySpreadConstraints embedded kubernetes pod configuration option,
221
+ // controls how pods are spread across your cluster among failure-domains
222
+ // such as regions, zones, nodes, and other user-defined topology domains
223
+ // https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
224
+ // +optional
225
+ TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
226
+ // Toleration embedded kubernetes pod configuration option,
227
+ // controls how pods can be scheduled with matching taints
228
+ // +optional
229
+ Tolerations []v1.Toleration `json:"tolerations,omitempty"`
230
+ // ENV vars to set on the OpenTelemetry TargetAllocator's Pods. These can then in certain cases be
231
+ // consumed in the config file for the TargetAllocator.
232
+ // +optional
233
+ Env []v1.EnvVar `json:"env,omitempty"`
234
+ // ObservabilitySpec defines how telemetry data gets handled.
235
+ //
236
+ // +optional
237
+ // +kubebuilder:validation:Optional
238
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Observability"
239
+ Observability v1alpha1.ObservabilitySpec `json:"observability,omitempty"`
240
+ // PodDisruptionBudget specifies the pod disruption budget configuration to use
241
+ // for the target allocator workload.
242
+ //
243
+ // +optional
244
+ PodDisruptionBudget * PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty"`
245
+ }
246
+
168
247
// OpenTelemetryCollectorStatus defines the observed state of OpenTelemetryCollector.
169
248
type OpenTelemetryCollectorStatus struct {
170
249
// Scale is the OpenTelemetryCollector's scale subresource status.
0 commit comments