Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add v1alpha2 embedded Target Allocator struct #2623

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apis/v1alpha2/allocation_strategy.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,5 +31,5 @@ const (
TargetAllocatorAllocationStrategyConsistentHashing TargetAllocatorAllocationStrategy = "consistent-hashing"

// TargetAllocatorFilterStrategyRelabelConfig targets will be consistently drops targets based on the relabel_config.
TargetAllocatorFilterStrategyRelabelConfig TargetAllocatorFilterStrategy = "consistent-hashing"
TargetAllocatorFilterStrategyRelabelConfig TargetAllocatorFilterStrategy = "relabel-config"
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a small bug fix, the value was simply wrong.

)
81 changes: 80 additions & 1 deletion apis/v1alpha2/opentelemetrycollector_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package v1alpha2

import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

Expand Down Expand Up @@ -120,7 +121,7 @@ type OpenTelemetryCollectorSpec struct {
OpenTelemetryCommonFields `json:",inline"`
// TargetAllocator indicates a value which determines whether to spawn a target allocation resource or not.
// +optional
TargetAllocator v1alpha1.OpenTelemetryTargetAllocator `json:"targetAllocator,omitempty"`
TargetAllocator TargetAllocatorEmbedded `json:"targetAllocator,omitempty"`
// Mode represents how the collector should be deployed (deployment, daemonset, statefulset or sidecar)
// +optional
Mode Mode `json:"mode,omitempty"`
Expand Down Expand Up @@ -165,6 +166,84 @@ type OpenTelemetryCollectorSpec struct {
DeploymentUpdateStrategy appsv1.DeploymentStrategy `json:"deploymentUpdateStrategy,omitempty"`
}

// TargetAllocatorEmbedded defines the configuration for the Prometheus target allocator, embedded in the
// OpenTelemetryCollector spec.
type TargetAllocatorEmbedded struct {
// Replicas is the number of pod instances for the underlying TargetAllocator. This should only be set to a value
// other than 1 if a strategy that allows for high availability is chosen. Currently, the only allocation strategy
// that can be run in a high availability mode is consistent-hashing.
// +optional
Replicas *int32 `json:"replicas,omitempty"`
// NodeSelector to schedule OpenTelemetry TargetAllocator pods.
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Resources to set on the OpenTelemetryTargetAllocator containers.
// +optional
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// AllocationStrategy determines which strategy the target allocator should use for allocation.
// The current options are least-weighted, consistent-hashing and per-node. The default is
// consistent-hashing.
// +optional
// +kubebuilder:default:=consistent-hashing
AllocationStrategy TargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
// FilterStrategy determines how to filter targets before allocating them among the collectors.
// The only current option is relabel-config (drops targets based on prom relabel_config).
// The default is relabel-config.
// +optional
// +kubebuilder:default:=relabel-config
FilterStrategy TargetAllocatorFilterStrategy `json:"filterStrategy,omitempty"`
// ServiceAccount indicates the name of an existing service account to use with this instance. When set,
// the operator will not automatically create a ServiceAccount for the TargetAllocator.
// +optional
ServiceAccount string `json:"serviceAccount,omitempty"`
// Image indicates the container image to use for the OpenTelemetry TargetAllocator.
// +optional
Image string `json:"image,omitempty"`
// Enabled indicates whether to use a target allocation mechanism for Prometheus targets or not.
// +optional
Enabled bool `json:"enabled,omitempty"`
// If specified, indicates the pod's scheduling constraints
// +optional
Affinity *v1.Affinity `json:"affinity,omitempty"`
// PrometheusCR defines the configuration for the retrieval of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 and podmonitor.monitoring.coreos.com/v1 ) retrieval.
// All CR instances which the ServiceAccount has access to will be retrieved. This includes other namespaces.
// +optional
PrometheusCR TargetAllocatorPrometheusCR `json:"prometheusCR,omitempty"`
// SecurityContext configures the container security context for
// the targetallocator.
// +optional
SecurityContext *v1.SecurityContext `json:"securityContext,omitempty"`
// PodSecurityContext configures the pod security context for the
// targetallocator.
// +optional
PodSecurityContext *v1.PodSecurityContext `json:"podSecurityContext,omitempty"`
// TopologySpreadConstraints embedded kubernetes pod configuration option,
// controls how pods are spread across your cluster among failure-domains
// such as regions, zones, nodes, and other user-defined topology domains
// https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
// +optional
TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
// Toleration embedded kubernetes pod configuration option,
// controls how pods can be scheduled with matching taints
// +optional
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
// ENV vars to set on the OpenTelemetry TargetAllocator's Pods. These can then in certain cases be
// consumed in the config file for the TargetAllocator.
// +optional
Env []v1.EnvVar `json:"env,omitempty"`
// ObservabilitySpec defines how telemetry data gets handled.
//
// +optional
// +kubebuilder:validation:Optional
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Observability"
Observability v1alpha1.ObservabilitySpec `json:"observability,omitempty"`
// PodDisruptionBudget specifies the pod disruption budget configuration to use
// for the target allocator workload.
//
// +optional
PodDisruptionBudget *PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty"`
}

// OpenTelemetryCollectorStatus defines the observed state of OpenTelemetryCollector.
type OpenTelemetryCollectorStatus struct {
// Scale is the OpenTelemetryCollector's scale subresource status.
Expand Down
71 changes: 71 additions & 0 deletions apis/v1alpha2/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

28 changes: 10 additions & 18 deletions controllers/builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1150,10 +1150,10 @@ service:
},
Mode: "statefulset",
Config: goodConfig,
TargetAllocator: v1alpha1.OpenTelemetryTargetAllocator{
TargetAllocator: v1alpha2.TargetAllocatorEmbedded{
Enabled: true,
FilterStrategy: "relabel-config",
PrometheusCR: v1alpha1.OpenTelemetryTargetAllocatorPrometheusCR{
PrometheusCR: v1alpha2.TargetAllocatorPrometheusCR{
Enabled: true,
},
},
Expand Down Expand Up @@ -1371,12 +1371,8 @@ label_selector:
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
prometheus_cr:
pod_monitor_selector:
matchlabels: {}
matchexpressions: []
service_monitor_selector:
matchlabels: {}
matchexpressions: []
pod_monitor_selector: null
service_monitor_selector: null
`,
},
},
Expand Down Expand Up @@ -1409,7 +1405,7 @@ prometheus_cr:
"app.kubernetes.io/version": "latest",
},
Annotations: map[string]string{
"opentelemetry-targetallocator-config/hash": "51477b182d2c9e7c0db27a2cbc9c7d35b24895b1cf0774d51a41b8d1753696ed",
"opentelemetry-targetallocator-config/hash": "59307aaa5652c8723f7803aa2d2b631389d1a0267444a4a8dc559878b5c4aa2c",
},
},
Spec: corev1.PodSpec{
Expand Down Expand Up @@ -1546,9 +1542,9 @@ prometheus_cr:
},
Mode: "statefulset",
Config: goodConfig,
TargetAllocator: v1alpha1.OpenTelemetryTargetAllocator{
TargetAllocator: v1alpha2.TargetAllocatorEmbedded{
Enabled: true,
PrometheusCR: v1alpha1.OpenTelemetryTargetAllocatorPrometheusCR{
PrometheusCR: v1alpha2.TargetAllocatorPrometheusCR{
Enabled: true,
},
FilterStrategy: "relabel-config",
Expand Down Expand Up @@ -1772,12 +1768,8 @@ label_selector:
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
prometheus_cr:
pod_monitor_selector:
matchlabels: {}
matchexpressions: []
service_monitor_selector:
matchlabels: {}
matchexpressions: []
pod_monitor_selector: null
service_monitor_selector: null
`,
},
},
Expand Down Expand Up @@ -1810,7 +1802,7 @@ prometheus_cr:
"app.kubernetes.io/version": "latest",
},
Annotations: map[string]string{
"opentelemetry-targetallocator-config/hash": "51477b182d2c9e7c0db27a2cbc9c7d35b24895b1cf0774d51a41b8d1753696ed",
"opentelemetry-targetallocator-config/hash": "59307aaa5652c8723f7803aa2d2b631389d1a0267444a4a8dc559878b5c4aa2c",
},
},
Spec: corev1.PodSpec{
Expand Down
2 changes: 2 additions & 0 deletions controllers/reconcile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,8 @@ func TestOpenTelemetryCollectorReconciler_Reconcile(t *testing.T) {
taConfig["config"] = promConfig["config"]
taConfig["allocation_strategy"] = "consistent-hashing"
taConfig["filter_strategy"] = "relabel-config"
taConfig["pod_monitor_selector"] = map[string]string{}
taConfig["service_monitor_selector"] = map[string]string{}
taConfig["prometheus_cr"] = map[string]any{
"scrape_interval": "30s",
"pod_monitor_selector": &metav1.LabelSelector{},
Expand Down
65 changes: 41 additions & 24 deletions internal/api/convert/v1alpha.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"errors"

"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1"
"github.com/open-telemetry/opentelemetry-operator/apis/v1alpha2"
Expand Down Expand Up @@ -93,30 +94,7 @@ func V1Alpha1to2(in v1alpha1.OpenTelemetryCollector) (v1alpha2.OpenTelemetryColl
out.Spec.OpenTelemetryCommonFields.InitContainers = copy.Spec.InitContainers
out.Spec.OpenTelemetryCommonFields.AdditionalContainers = copy.Spec.AdditionalContainers

out.Spec.TargetAllocator.Replicas = copy.Spec.TargetAllocator.Replicas
out.Spec.TargetAllocator.NodeSelector = copy.Spec.TargetAllocator.NodeSelector
out.Spec.TargetAllocator.Resources = copy.Spec.TargetAllocator.Resources
out.Spec.TargetAllocator.AllocationStrategy = copy.Spec.TargetAllocator.AllocationStrategy
out.Spec.TargetAllocator.FilterStrategy = copy.Spec.TargetAllocator.FilterStrategy
out.Spec.TargetAllocator.ServiceAccount = copy.Spec.TargetAllocator.ServiceAccount
out.Spec.TargetAllocator.Image = copy.Spec.TargetAllocator.Image
out.Spec.TargetAllocator.Enabled = copy.Spec.TargetAllocator.Enabled
out.Spec.TargetAllocator.Affinity = copy.Spec.TargetAllocator.Affinity
out.Spec.TargetAllocator.PrometheusCR.Enabled = copy.Spec.TargetAllocator.PrometheusCR.Enabled
out.Spec.TargetAllocator.PrometheusCR.ScrapeInterval = copy.Spec.TargetAllocator.PrometheusCR.ScrapeInterval
out.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector = copy.Spec.TargetAllocator.PrometheusCR.PodMonitorSelector
out.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector = copy.Spec.TargetAllocator.PrometheusCR.ServiceMonitorSelector
out.Spec.TargetAllocator.SecurityContext = copy.Spec.TargetAllocator.SecurityContext
out.Spec.TargetAllocator.PodSecurityContext = copy.Spec.TargetAllocator.PodSecurityContext
out.Spec.TargetAllocator.TopologySpreadConstraints = copy.Spec.TargetAllocator.TopologySpreadConstraints
out.Spec.TargetAllocator.Tolerations = copy.Spec.TargetAllocator.Tolerations
out.Spec.TargetAllocator.Env = copy.Spec.TargetAllocator.Env
out.Spec.TargetAllocator.Observability = v1alpha1.ObservabilitySpec{
Metrics: v1alpha1.MetricsConfigSpec{
EnableMetrics: copy.Spec.TargetAllocator.Observability.Metrics.EnableMetrics,
},
}
out.Spec.TargetAllocator.PodDisruptionBudget = copy.Spec.TargetAllocator.PodDisruptionBudget
out.Spec.TargetAllocator = TargetAllocatorEmbedded(copy.Spec.TargetAllocator)

out.Spec.Mode = v1alpha2.Mode(copy.Spec.Mode)
out.Spec.UpgradeStrategy = v1alpha2.UpgradeStrategy(copy.Spec.UpgradeStrategy)
Expand Down Expand Up @@ -148,3 +126,42 @@ func V1Alpha1to2(in v1alpha1.OpenTelemetryCollector) (v1alpha2.OpenTelemetryColl

return out, nil
}

func TargetAllocatorEmbedded(in v1alpha1.OpenTelemetryTargetAllocator) v1alpha2.TargetAllocatorEmbedded {
out := v1alpha2.TargetAllocatorEmbedded{}
out.Replicas = in.Replicas
out.NodeSelector = in.NodeSelector
out.Resources = in.Resources
out.AllocationStrategy = v1alpha2.TargetAllocatorAllocationStrategy(in.AllocationStrategy)
out.FilterStrategy = v1alpha2.TargetAllocatorFilterStrategy(in.FilterStrategy)
out.ServiceAccount = in.ServiceAccount
out.Image = in.Image
out.Enabled = in.Enabled
out.Affinity = in.Affinity
out.PrometheusCR.Enabled = in.PrometheusCR.Enabled
out.PrometheusCR.ScrapeInterval = in.PrometheusCR.ScrapeInterval
out.SecurityContext = in.SecurityContext
out.PodSecurityContext = in.PodSecurityContext
out.TopologySpreadConstraints = in.TopologySpreadConstraints
out.Tolerations = in.Tolerations
out.Env = in.Env
out.Observability = v1alpha1.ObservabilitySpec{
Metrics: v1alpha1.MetricsConfigSpec{
EnableMetrics: in.Observability.Metrics.EnableMetrics,
},
}

out.PrometheusCR.PodMonitorSelector = &metav1.LabelSelector{
MatchLabels: in.PrometheusCR.PodMonitorSelector,
}
out.PrometheusCR.ServiceMonitorSelector = &metav1.LabelSelector{
MatchLabels: in.PrometheusCR.ServiceMonitorSelector,
}
if in.PodDisruptionBudget != nil {
out.PodDisruptionBudget = &v1alpha2.PodDisruptionBudgetSpec{
MinAvailable: in.PodDisruptionBudget.MinAvailable,
MaxUnavailable: in.PodDisruptionBudget.MaxUnavailable,
}
}
return out
}
Loading
Loading