Skip to content

Commit 1d92baf

Browse files
authored
Merge branch 'main' into scrape_config_probe_selector
2 parents 51f12aa + bf7cdd1 commit 1d92baf

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+1266
-62
lines changed

.chloggen/3427.yaml

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
2+
change_type: enhancement
3+
4+
# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
5+
component: collector
6+
7+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
8+
note: Create RBAC rules for the k8s_cluster receiver automatically.
9+
10+
# One or more tracking issues related to the change
11+
issues: [3427]
12+
13+
# (Optional) One or more lines of additional information to render under the primary note.
14+
# These lines will be padded with 2 spaces and then inserted directly into the document.
15+
# Use pipe (|) for multiline entries.
16+
subtext:

.chloggen/3432.yaml

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
2+
change_type: enhancement
3+
4+
# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
5+
component: collector
6+
7+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
8+
note: Add a warning message when one created collector needs extra RBAC permissions and the service account doesn't have them.
9+
10+
# One or more tracking issues related to the change
11+
issues: [3432]
12+
13+
# (Optional) One or more lines of additional information to render under the primary note.
14+
# These lines will be padded with 2 spaces and then inserted directly into the document.
15+
# Use pipe (|) for multiline entries.
16+
subtext:
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
2+
change_type: 'enhancement'
3+
4+
# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
5+
component: target allocator
6+
7+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
8+
note: Added allocation_fallback_strategy option as fallback strategy for per-node allocation strategy, can be enabled with feature flag operator.targetallocator.fallbackstrategy
9+
10+
# One or more tracking issues related to the change
11+
issues: [3477]
12+
13+
# (Optional) One or more lines of additional information to render under the primary note.
14+
# These lines will be padded with 2 spaces and then inserted directly into the document.
15+
# Use pipe (|) for multiline entries.
16+
subtext: |
17+
If using per-node allocation strategy, targets that are not attached to a node will not
18+
be allocated. As the per-node strategy is required when running as a daemonset, it is
19+
not possible to assign some targets under a daemonset deployment.
20+
Feature flag operator.targetallocator.fallbackstrategy has been added and results in consistent-hashing
21+
being used as the fallback allocation strategy for "per-node" only at this time.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
2+
change_type: bug_fix
3+
4+
# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
5+
component: auto-instrumentation
6+
7+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
8+
note: Bump base memory requirements for python and go
9+
10+
# One or more tracking issues related to the change
11+
issues: [3479]
12+
13+
# (Optional) One or more lines of additional information to render under the primary note.
14+
# These lines will be padded with 2 spaces and then inserted directly into the document.
15+
# Use pipe (|) for multiline entries.
16+
subtext:
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
2+
change_type: enhancement
3+
4+
# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
5+
component: operator
6+
7+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
8+
note: Replace references to gcr.io/kubebuilder/kube-rbac-proxy with quay.io/brancz/kube-rbac-proxy
9+
10+
# One or more tracking issues related to the change
11+
issues: [3485]
12+
13+
# (Optional) One or more lines of additional information to render under the primary note.
14+
# These lines will be padded with 2 spaces and then inserted directly into the document.
15+
# Use pipe (|) for multiline entries.
16+
subtext:

.github/workflows/publish-autoinstrumentation-nodejs.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ jobs:
2626
- uses: actions/checkout@v4
2727

2828
- name: Read version
29-
run: echo VERSION=$(cat autoinstrumentation/nodejs/package.json | jq -r '.dependencies."@opentelemetry/sdk-node"') >> $GITHUB_ENV
29+
run: echo VERSION=$(cat autoinstrumentation/nodejs/package.json | jq -r '.dependencies."@opentelemetry/auto-instrumentations-node"') >> $GITHUB_ENV
3030

3131
- name: Docker meta
3232
id: meta

Makefile

+1
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,7 @@ add-rbac-permissions-to-operator: manifests kustomize
206206
# This folder is ignored by .gitignore
207207
mkdir -p config/rbac/extra-permissions-operator
208208
cp -r tests/e2e-automatic-rbac/extra-permissions-operator/* config/rbac/extra-permissions-operator
209+
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/clusterresourcequotas.yaml
209210
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/cronjobs.yaml
210211
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/daemonsets.yaml
211212
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/events.yaml

apis/v1alpha1/instrumentation_webhook.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -128,13 +128,13 @@ func (w InstrumentationWebhook) defaulter(r *Instrumentation) error {
128128
if r.Spec.Python.Resources.Limits == nil {
129129
r.Spec.Python.Resources.Limits = corev1.ResourceList{
130130
corev1.ResourceCPU: resource.MustParse("500m"),
131-
corev1.ResourceMemory: resource.MustParse("32Mi"),
131+
corev1.ResourceMemory: resource.MustParse("64Mi"),
132132
}
133133
}
134134
if r.Spec.Python.Resources.Requests == nil {
135135
r.Spec.Python.Resources.Requests = corev1.ResourceList{
136136
corev1.ResourceCPU: resource.MustParse("50m"),
137-
corev1.ResourceMemory: resource.MustParse("32Mi"),
137+
corev1.ResourceMemory: resource.MustParse("64Mi"),
138138
}
139139
}
140140
if r.Spec.DotNet.Image == "" {
@@ -158,13 +158,13 @@ func (w InstrumentationWebhook) defaulter(r *Instrumentation) error {
158158
if r.Spec.Go.Resources.Limits == nil {
159159
r.Spec.Go.Resources.Limits = corev1.ResourceList{
160160
corev1.ResourceCPU: resource.MustParse("500m"),
161-
corev1.ResourceMemory: resource.MustParse("32Mi"),
161+
corev1.ResourceMemory: resource.MustParse("64Mi"),
162162
}
163163
}
164164
if r.Spec.Go.Resources.Requests == nil {
165165
r.Spec.Go.Resources.Requests = corev1.ResourceList{
166166
corev1.ResourceCPU: resource.MustParse("50m"),
167-
corev1.ResourceMemory: resource.MustParse("32Mi"),
167+
corev1.ResourceMemory: resource.MustParse("64Mi"),
168168
}
169169
}
170170
if r.Spec.ApacheHttpd.Image == "" {

autoinstrumentation/nodejs/package.json

+2-11
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,8 @@
1414
"typescript": "^5.6.3"
1515
},
1616
"dependencies": {
17-
"@opentelemetry/api": "1.9.0",
18-
"@opentelemetry/auto-instrumentations-node": "0.52.0",
17+
"@opentelemetry/auto-instrumentations-node": "0.52.1",
1918
"@opentelemetry/exporter-metrics-otlp-grpc": "0.54.0",
20-
"@opentelemetry/exporter-prometheus": "0.54.0",
21-
"@opentelemetry/exporter-trace-otlp-grpc": "0.54.0",
22-
"@opentelemetry/resource-detector-alibaba-cloud": "0.29.4",
23-
"@opentelemetry/resource-detector-aws": "1.7.0",
24-
"@opentelemetry/resource-detector-container": "0.5.0",
25-
"@opentelemetry/resource-detector-gcp": "0.29.13",
26-
"@opentelemetry/resources": "1.27.0",
27-
"@opentelemetry/sdk-metrics": "1.27.0",
28-
"@opentelemetry/sdk-node": "0.54.0"
19+
"@opentelemetry/exporter-prometheus": "0.54.0"
2920
}
3021
}

bundle/community/manifests/opentelemetry-operator.clusterserviceversion.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,7 @@ spec:
514514
- --upstream=http://127.0.0.1:8080/
515515
- --logtostderr=true
516516
- --v=0
517-
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
517+
image: quay.io/brancz/kube-rbac-proxy:v0.13.1
518518
name: kube-rbac-proxy
519519
ports:
520520
- containerPort: 8443

bundle/openshift/manifests/opentelemetry-operator.clusterserviceversion.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -522,7 +522,7 @@ spec:
522522
- --tls-private-key-file=/var/run/tls/server/tls.key
523523
- --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256
524524
- --tls-min-version=VersionTLS12
525-
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
525+
image: quay.io/brancz/kube-rbac-proxy:v0.13.1
526526
name: kube-rbac-proxy
527527
ports:
528528
- containerPort: 8443

cmd/otel-allocator/allocation/allocator.go

+5
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,11 @@ func (a *allocator) SetFilter(filter Filter) {
7676
a.filter = filter
7777
}
7878

79+
// SetFallbackStrategy sets the fallback strategy to use.
80+
func (a *allocator) SetFallbackStrategy(strategy Strategy) {
81+
a.strategy.SetFallbackStrategy(strategy)
82+
}
83+
7984
// SetTargets accepts a list of targets that will be used to make
8085
// load balancing decisions. This method should be called when there are
8186
// new targets discovered or existing targets are shutdown.

cmd/otel-allocator/allocation/consistent_hashing.go

+2
Original file line numberDiff line numberDiff line change
@@ -83,3 +83,5 @@ func (s *consistentHashingStrategy) SetCollectors(collectors map[string]*Collect
8383
s.consistentHasher = consistent.New(members, s.config)
8484

8585
}
86+
87+
func (s *consistentHashingStrategy) SetFallbackStrategy(fallbackStrategy Strategy) {}

cmd/otel-allocator/allocation/least_weighted.go

+2
Original file line numberDiff line numberDiff line change
@@ -54,3 +54,5 @@ func (s *leastWeightedStrategy) GetCollectorForTarget(collectors map[string]*Col
5454
}
5555

5656
func (s *leastWeightedStrategy) SetCollectors(_ map[string]*Collector) {}
57+
58+
func (s *leastWeightedStrategy) SetFallbackStrategy(fallbackStrategy Strategy) {}

cmd/otel-allocator/allocation/per_node.go

+16-2
Original file line numberDiff line numberDiff line change
@@ -25,21 +25,31 @@ const perNodeStrategyName = "per-node"
2525
var _ Strategy = &perNodeStrategy{}
2626

2727
type perNodeStrategy struct {
28-
collectorByNode map[string]*Collector
28+
collectorByNode map[string]*Collector
29+
fallbackStrategy Strategy
2930
}
3031

3132
func newPerNodeStrategy() Strategy {
3233
return &perNodeStrategy{
33-
collectorByNode: make(map[string]*Collector),
34+
collectorByNode: make(map[string]*Collector),
35+
fallbackStrategy: nil,
3436
}
3537
}
3638

39+
func (s *perNodeStrategy) SetFallbackStrategy(fallbackStrategy Strategy) {
40+
s.fallbackStrategy = fallbackStrategy
41+
}
42+
3743
func (s *perNodeStrategy) GetName() string {
3844
return perNodeStrategyName
3945
}
4046

4147
func (s *perNodeStrategy) GetCollectorForTarget(collectors map[string]*Collector, item *target.Item) (*Collector, error) {
4248
targetNodeName := item.GetNodeName()
49+
if targetNodeName == "" && s.fallbackStrategy != nil {
50+
return s.fallbackStrategy.GetCollectorForTarget(collectors, item)
51+
}
52+
4353
collector, ok := s.collectorByNode[targetNodeName]
4454
if !ok {
4555
return nil, fmt.Errorf("could not find collector for node %s", targetNodeName)
@@ -54,4 +64,8 @@ func (s *perNodeStrategy) SetCollectors(collectors map[string]*Collector) {
5464
s.collectorByNode[collector.NodeName] = collector
5565
}
5666
}
67+
68+
if s.fallbackStrategy != nil {
69+
s.fallbackStrategy.SetCollectors(collectors)
70+
}
5771
}

cmd/otel-allocator/allocation/per_node_test.go

+82-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,17 @@ import (
2626

2727
var loggerPerNode = logf.Log.WithName("unit-tests")
2828

29-
// Tests that two targets with the same target url and job name but different label set are both added.
29+
func GetTargetsWithNodeName(targets []*target.Item) (targetsWithNodeName []*target.Item) {
30+
for _, item := range targets {
31+
if item.GetNodeName() != "" {
32+
targetsWithNodeName = append(targetsWithNodeName, item)
33+
}
34+
}
35+
return targetsWithNodeName
36+
}
37+
38+
// Tests that four targets, with one of them lacking node labels, are assigned except for the
39+
// target that lacks node labels.
3040
func TestAllocationPerNode(t *testing.T) {
3141
// prepare allocator with initial targets and collectors
3242
s, _ := New("per-node", loggerPerNode)
@@ -93,6 +103,77 @@ func TestAllocationPerNode(t *testing.T) {
93103
}
94104
}
95105

106+
// Tests that four targets, with one of them missing node labels, are all assigned.
107+
func TestAllocationPerNodeUsingFallback(t *testing.T) {
108+
// prepare allocator with initial targets and collectors
109+
s, _ := New("per-node", loggerPerNode, WithFallbackStrategy(consistentHashingStrategyName))
110+
111+
cols := MakeNCollectors(4, 0)
112+
s.SetCollectors(cols)
113+
firstLabels := labels.Labels{
114+
{Name: "test", Value: "test1"},
115+
{Name: "__meta_kubernetes_pod_node_name", Value: "node-0"},
116+
}
117+
secondLabels := labels.Labels{
118+
{Name: "test", Value: "test2"},
119+
{Name: "__meta_kubernetes_node_name", Value: "node-1"},
120+
}
121+
// no label, should be allocated by the fallback strategy
122+
thirdLabels := labels.Labels{
123+
{Name: "test", Value: "test3"},
124+
}
125+
// endpointslice target kind and name
126+
fourthLabels := labels.Labels{
127+
{Name: "test", Value: "test4"},
128+
{Name: "__meta_kubernetes_endpointslice_address_target_kind", Value: "Node"},
129+
{Name: "__meta_kubernetes_endpointslice_address_target_name", Value: "node-3"},
130+
}
131+
132+
firstTarget := target.NewItem("sample-name", "0.0.0.0:8000", firstLabels, "")
133+
secondTarget := target.NewItem("sample-name", "0.0.0.0:8000", secondLabels, "")
134+
thirdTarget := target.NewItem("sample-name", "0.0.0.0:8000", thirdLabels, "")
135+
fourthTarget := target.NewItem("sample-name", "0.0.0.0:8000", fourthLabels, "")
136+
137+
targetList := map[string]*target.Item{
138+
firstTarget.Hash(): firstTarget,
139+
secondTarget.Hash(): secondTarget,
140+
thirdTarget.Hash(): thirdTarget,
141+
fourthTarget.Hash(): fourthTarget,
142+
}
143+
144+
// test that targets and collectors are added properly
145+
s.SetTargets(targetList)
146+
147+
// verify length
148+
actualItems := s.TargetItems()
149+
150+
// all targets should be allocated
151+
expectedTargetLen := len(targetList)
152+
assert.Len(t, actualItems, expectedTargetLen)
153+
154+
// verify allocation to nodes
155+
for targetHash, item := range targetList {
156+
actualItem, found := actualItems[targetHash]
157+
158+
assert.True(t, found, "target with hash %s not found", item.Hash())
159+
160+
itemsForCollector := s.GetTargetsForCollectorAndJob(actualItem.CollectorName, actualItem.JobName)
161+
162+
// first two should be assigned one to each collector; if third target, it should be assigned
163+
// according to the fallback strategy which may assign it to the otherwise empty collector or
164+
// one of the others, depending on the strategy and collector loop order
165+
if targetHash == thirdTarget.Hash() {
166+
assert.Empty(t, item.GetNodeName())
167+
assert.NotZero(t, len(itemsForCollector))
168+
continue
169+
}
170+
171+
// Only check targets that have been assigned using the per-node (not fallback) strategy here
172+
assert.Len(t, GetTargetsWithNodeName(itemsForCollector), 1)
173+
assert.Equal(t, actualItem, GetTargetsWithNodeName(itemsForCollector)[0])
174+
}
175+
}
176+
96177
func TestTargetsWithNoCollectorsPerNode(t *testing.T) {
97178
// prepare allocator with initial targets and collectors
98179
c, _ := New("per-node", loggerPerNode)

0 commit comments

Comments
 (0)