Skip to content

Commit 5400da8

Browse files
committed
Enable scrape config and probe support in TA
1 parent 22e8c06 commit 5400da8

File tree

3 files changed

+202
-10
lines changed

3 files changed

+202
-10
lines changed

cmd/otel-allocator/config/config.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,13 @@ type Config struct {
6161
type PrometheusCRConfig struct {
6262
Enabled bool `yaml:"enabled,omitempty"`
6363
PodMonitorSelector *metav1.LabelSelector `yaml:"pod_monitor_selector,omitempty"`
64+
PodMonitorNamespaceSelector *metav1.LabelSelector `yaml:"pod_monitor_namespace_selector,omitempty"`
6465
ServiceMonitorSelector *metav1.LabelSelector `yaml:"service_monitor_selector,omitempty"`
6566
ServiceMonitorNamespaceSelector *metav1.LabelSelector `yaml:"service_monitor_namespace_selector,omitempty"`
66-
PodMonitorNamespaceSelector *metav1.LabelSelector `yaml:"pod_monitor_namespace_selector,omitempty"`
67+
ScrapeConfigSelector *metav1.LabelSelector `yaml:"scrape_config_selector,omitempty"`
68+
ScrapeConfigNamespaceSelector *metav1.LabelSelector `yaml:"scrape_config_namespace_selector,omitempty"`
69+
ProbeSelector *metav1.LabelSelector `yaml:"probe_selector,omitempty"`
70+
ProbeNamespaceSelector *metav1.LabelSelector `yaml:"probe_namespace_selector,omitempty"`
6771
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
6872
}
6973

cmd/otel-allocator/watcher/promOperator.go

+36-5
Original file line numberDiff line numberDiff line change
@@ -79,10 +79,14 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat
7979
Spec: monitoringv1.PrometheusSpec{
8080
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
8181
ScrapeInterval: monitoringv1.Duration(cfg.PrometheusCR.ScrapeInterval.String()),
82-
ServiceMonitorSelector: cfg.PrometheusCR.ServiceMonitorSelector,
8382
PodMonitorSelector: cfg.PrometheusCR.PodMonitorSelector,
84-
ServiceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
8583
PodMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
84+
ServiceMonitorSelector: cfg.PrometheusCR.ServiceMonitorSelector,
85+
ServiceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
86+
ScrapeConfigSelector: cfg.PrometheusCR.ScrapeConfigSelector,
87+
ScrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
88+
ProbeSelector: cfg.PrometheusCR.ProbeSelector,
89+
ProbeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
8690
ServiceDiscoveryRole: &serviceDiscoveryRole,
8791
},
8892
},
@@ -133,6 +137,8 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat
133137
kubeConfigPath: cfg.KubeConfigFilePath,
134138
podMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
135139
serviceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
140+
scrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
141+
probeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
136142
resourceSelector: resourceSelector,
137143
store: store,
138144
}, nil
@@ -150,12 +156,13 @@ type PrometheusCRWatcher struct {
150156
kubeConfigPath string
151157
podMonitorNamespaceSelector *metav1.LabelSelector
152158
serviceMonitorNamespaceSelector *metav1.LabelSelector
159+
scrapeConfigNamespaceSelector *metav1.LabelSelector
160+
probeNamespaceSelector *metav1.LabelSelector
153161
resourceSelector *prometheus.ResourceSelector
154162
store *assets.StoreBuilder
155163
}
156164

157165
func getNamespaceInformer(ctx context.Context, allowList map[string]struct{}, promOperatorLogger log.Logger, clientset kubernetes.Interface, operatorMetrics *operator.Metrics) (cache.SharedIndexInformer, error) {
158-
159166
kubernetesVersion, err := clientset.Discovery().ServerVersion()
160167
if err != nil {
161168
return nil, err
@@ -196,9 +203,21 @@ func getInformers(factory informers.FactoriesForNamespaces) (map[string]*informe
196203
return nil, err
197204
}
198205

206+
probeInformers, err := informers.NewInformersForResource(factory, monitoringv1.SchemeGroupVersion.WithResource(monitoringv1.ProbeName))
207+
if err != nil {
208+
return nil, err
209+
}
210+
211+
scrapeConfigInformers, err := informers.NewInformersForResource(factory, promv1alpha1.SchemeGroupVersion.WithResource(promv1alpha1.ScrapeConfigName))
212+
if err != nil {
213+
return nil, err
214+
}
215+
199216
return map[string]*informers.ForResource{
200217
monitoringv1.ServiceMonitorName: serviceMonitorInformers,
201218
monitoringv1.PodMonitorName: podMonitorInformers,
219+
monitoringv1.ProbeName: probeInformers,
220+
promv1alpha1.ScrapeConfigName: scrapeConfigInformers,
202221
}, nil
203222
}
204223

@@ -228,6 +247,8 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch
228247
for name, selector := range map[string]*metav1.LabelSelector{
229248
"PodMonitorNamespaceSelector": w.podMonitorNamespaceSelector,
230249
"ServiceMonitorNamespaceSelector": w.serviceMonitorNamespaceSelector,
250+
"ProbeNamespaceSelector": w.probeNamespaceSelector,
251+
"ScrapeConfigNamespaceSelector": w.scrapeConfigNamespaceSelector,
231252
} {
232253
sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector)
233254
if err != nil {
@@ -342,6 +363,16 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi
342363
return nil, err
343364
}
344365

366+
probeInstances, err := w.resourceSelector.SelectProbes(ctx, w.informers[monitoringv1.ProbeName].ListAllByNamespace)
367+
if err != nil {
368+
return nil, err
369+
}
370+
371+
scrapeConfigInstances, err := w.resourceSelector.SelectScrapeConfigs(ctx, w.informers[promv1alpha1.ScrapeConfigName].ListAllByNamespace)
372+
if err != nil {
373+
return nil, err
374+
}
375+
345376
generatedConfig, err := w.configGenerator.GenerateServerConfiguration(
346377
"30s",
347378
"",
@@ -352,8 +383,8 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi
352383
nil,
353384
serviceMonitorInstances,
354385
podMonitorInstances,
355-
map[string]*monitoringv1.Probe{},
356-
map[string]*promv1alpha1.ScrapeConfig{},
386+
probeInstances,
387+
scrapeConfigInstances,
357388
w.store,
358389
nil,
359390
nil,

cmd/otel-allocator/watcher/promOperator_test.go

+161-4
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"github.com/go-kit/log"
2525
"github.com/go-kit/log/level"
2626
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
27+
promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
2728
"github.com/prometheus-operator/prometheus-operator/pkg/assets"
2829
fakemonitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake"
2930
"github.com/prometheus-operator/prometheus-operator/pkg/informers"
@@ -35,6 +36,7 @@ import (
3536
promconfig "github.com/prometheus/prometheus/config"
3637
"github.com/prometheus/prometheus/discovery"
3738
kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes"
39+
"github.com/prometheus/prometheus/discovery/targetgroup"
3840
"github.com/stretchr/testify/assert"
3941
"github.com/stretchr/testify/require"
4042
v1 "k8s.io/api/core/v1"
@@ -59,6 +61,8 @@ func TestLoadConfig(t *testing.T) {
5961
name string
6062
serviceMonitors []*monitoringv1.ServiceMonitor
6163
podMonitors []*monitoringv1.PodMonitor
64+
scrapeConfigs []*promv1alpha1.ScrapeConfig
65+
probes []*monitoringv1.Probe
6266
want *promconfig.Config
6367
wantErr bool
6468
cfg allocatorconfig.Config
@@ -662,6 +666,136 @@ func TestLoadConfig(t *testing.T) {
662666
},
663667
},
664668
},
669+
{
670+
name: "scrape configs selector test",
671+
scrapeConfigs: []*promv1alpha1.ScrapeConfig{
672+
{
673+
ObjectMeta: metav1.ObjectMeta{
674+
Name: "scrapeconfig-test-1",
675+
Namespace: "test",
676+
Labels: map[string]string{
677+
"testpod": "testpod",
678+
},
679+
},
680+
Spec: promv1alpha1.ScrapeConfigSpec{
681+
JobName: func() *string {
682+
j := "scrapeConfig/test/scrapeconfig-test-1"
683+
return &j
684+
}(),
685+
StaticConfigs: []promv1alpha1.StaticConfig{
686+
{
687+
Targets: []promv1alpha1.Target{"127.0.0.1:8888"},
688+
Labels: nil,
689+
},
690+
},
691+
},
692+
},
693+
},
694+
cfg: allocatorconfig.Config{
695+
PrometheusCR: allocatorconfig.PrometheusCRConfig{
696+
ScrapeConfigSelector: &metav1.LabelSelector{
697+
MatchLabels: map[string]string{
698+
"testpod": "testpod",
699+
},
700+
},
701+
},
702+
},
703+
want: &promconfig.Config{
704+
ScrapeConfigs: []*promconfig.ScrapeConfig{
705+
{
706+
JobName: "scrapeConfig/test/scrapeconfig-test-1",
707+
ScrapeInterval: model.Duration(30 * time.Second),
708+
ScrapeProtocols: defaultScrapeProtocols,
709+
ScrapeTimeout: model.Duration(10 * time.Second),
710+
HonorTimestamps: true,
711+
HonorLabels: false,
712+
Scheme: "http",
713+
MetricsPath: "/metrics",
714+
ServiceDiscoveryConfigs: []discovery.Config{
715+
discovery.StaticConfig{
716+
&targetgroup.Group{
717+
Targets: []model.LabelSet{
718+
map[model.LabelName]model.LabelValue{
719+
"__address__": "127.0.0.1:8888",
720+
},
721+
},
722+
Labels: map[model.LabelName]model.LabelValue{},
723+
Source: "0",
724+
},
725+
},
726+
},
727+
HTTPClientConfig: config.DefaultHTTPClientConfig,
728+
EnableCompression: true,
729+
},
730+
},
731+
},
732+
},
733+
{
734+
name: "probe selector test",
735+
probes: []*monitoringv1.Probe{
736+
{
737+
ObjectMeta: metav1.ObjectMeta{
738+
Name: "probe-test-1",
739+
Namespace: "test",
740+
Labels: map[string]string{
741+
"testpod": "testpod",
742+
},
743+
},
744+
Spec: monitoringv1.ProbeSpec{
745+
JobName: "probe/test/probe-1/0",
746+
ProberSpec: monitoringv1.ProberSpec{
747+
URL: "localhost:50671",
748+
Path: "/metrics",
749+
},
750+
Targets: monitoringv1.ProbeTargets{
751+
StaticConfig: &monitoringv1.ProbeTargetStaticConfig{
752+
Targets: []string{"prometheus.io"},
753+
},
754+
},
755+
},
756+
},
757+
},
758+
cfg: allocatorconfig.Config{
759+
PrometheusCR: allocatorconfig.PrometheusCRConfig{
760+
ProbeSelector: &metav1.LabelSelector{
761+
MatchLabels: map[string]string{
762+
"testpod": "testpod",
763+
},
764+
},
765+
},
766+
},
767+
want: &promconfig.Config{
768+
ScrapeConfigs: []*promconfig.ScrapeConfig{
769+
{
770+
JobName: "probe/test/probe-test-1",
771+
ScrapeInterval: model.Duration(30 * time.Second),
772+
ScrapeProtocols: defaultScrapeProtocols,
773+
ScrapeTimeout: model.Duration(10 * time.Second),
774+
HonorTimestamps: true,
775+
HonorLabels: false,
776+
Scheme: "http",
777+
MetricsPath: "/metrics",
778+
ServiceDiscoveryConfigs: []discovery.Config{
779+
discovery.StaticConfig{
780+
&targetgroup.Group{
781+
Targets: []model.LabelSet{
782+
map[model.LabelName]model.LabelValue{
783+
"__address__": "prometheus.io",
784+
},
785+
},
786+
Labels: map[model.LabelName]model.LabelValue{
787+
"namespace": "test",
788+
},
789+
Source: "0",
790+
},
791+
},
792+
},
793+
HTTPClientConfig: config.DefaultHTTPClientConfig,
794+
EnableCompression: true,
795+
},
796+
},
797+
},
798+
},
665799
{
666800
name: "service monitor namespace selector test",
667801
serviceMonitors: []*monitoringv1.ServiceMonitor{
@@ -805,7 +939,7 @@ func TestLoadConfig(t *testing.T) {
805939
}
806940
for _, tt := range tests {
807941
t.Run(tt.name, func(t *testing.T) {
808-
w, _ := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg)
942+
w, _ := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.probes, tt.scrapeConfigs, tt.cfg)
809943

810944
// Start namespace informers in order to populate cache.
811945
go w.nsInformer.Run(w.stopChannel)
@@ -910,7 +1044,7 @@ func TestNamespaceLabelUpdate(t *testing.T) {
9101044
ScrapeConfigs: []*promconfig.ScrapeConfig{},
9111045
}
9121046

913-
w, source := getTestPrometheusCRWatcher(t, nil, podMonitors, cfg)
1047+
w, source := getTestPrometheusCRWatcher(t, nil, podMonitors, nil, nil, cfg)
9141048
events := make(chan Event, 1)
9151049
eventInterval := 5 * time.Millisecond
9161050

@@ -976,7 +1110,7 @@ func TestRateLimit(t *testing.T) {
9761110
eventInterval := 500 * time.Millisecond
9771111
cfg := allocatorconfig.Config{}
9781112

979-
w, _ := getTestPrometheusCRWatcher(t, nil, nil, cfg)
1113+
w, _ := getTestPrometheusCRWatcher(t, nil, nil, nil, nil, cfg)
9801114
defer w.Close()
9811115
w.eventInterval = eventInterval
9821116

@@ -1037,7 +1171,7 @@ func TestRateLimit(t *testing.T) {
10371171

10381172
// getTestPrometheusCRWatcher creates a test instance of PrometheusCRWatcher with fake clients
10391173
// and test secrets.
1040-
func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) (*PrometheusCRWatcher, *fcache.FakeControllerSource) {
1174+
func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, probes []*monitoringv1.Probe, scrapeConfigs []*promv1alpha1.ScrapeConfig, cfg allocatorconfig.Config) (*PrometheusCRWatcher, *fcache.FakeControllerSource) {
10411175
mClient := fakemonitoringclient.NewSimpleClientset()
10421176
for _, sm := range svcMonitors {
10431177
if sm != nil {
@@ -1055,6 +1189,23 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic
10551189
}
10561190
}
10571191
}
1192+
for _, prb := range probes {
1193+
if prb != nil {
1194+
_, err := mClient.MonitoringV1().Probes(prb.Namespace).Create(context.Background(), prb, metav1.CreateOptions{})
1195+
if err != nil {
1196+
t.Fatal(t, err)
1197+
}
1198+
}
1199+
}
1200+
1201+
for _, scc := range scrapeConfigs {
1202+
if scc != nil {
1203+
_, err := mClient.MonitoringV1alpha1().ScrapeConfigs(scc.Namespace).Create(context.Background(), scc, metav1.CreateOptions{})
1204+
if err != nil {
1205+
t.Fatal(t, err)
1206+
}
1207+
}
1208+
}
10581209

10591210
k8sClient := fake.NewSimpleClientset()
10601211
_, err := k8sClient.CoreV1().Secrets("test").Create(context.Background(), &v1.Secret{
@@ -1094,6 +1245,10 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic
10941245
PodMonitorSelector: cfg.PrometheusCR.PodMonitorSelector,
10951246
ServiceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
10961247
PodMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
1248+
ProbeSelector: cfg.PrometheusCR.ProbeSelector,
1249+
ProbeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
1250+
ScrapeConfigSelector: cfg.PrometheusCR.ScrapeConfigSelector,
1251+
ScrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
10971252
ServiceDiscoveryRole: &serviceDiscoveryRole,
10981253
},
10991254
},
@@ -1136,6 +1291,8 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic
11361291
configGenerator: generator,
11371292
podMonitorNamespaceSelector: cfg.PrometheusCR.PodMonitorNamespaceSelector,
11381293
serviceMonitorNamespaceSelector: cfg.PrometheusCR.ServiceMonitorNamespaceSelector,
1294+
probeNamespaceSelector: cfg.PrometheusCR.ProbeNamespaceSelector,
1295+
scrapeConfigNamespaceSelector: cfg.PrometheusCR.ScrapeConfigNamespaceSelector,
11391296
resourceSelector: resourceSelector,
11401297
store: store,
11411298
}, source

0 commit comments

Comments
 (0)