Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft: Add check for watch_namespace before mutating Pod #2666

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,19 @@ prepare-e2e: chainsaw set-image-controller add-image-targetallocator add-image-o
.PHONY: prepare-e2e-with-featuregates
prepare-e2e-with-featuregates: chainsaw enable-operator-featuregates prepare-e2e

.PHONY: enable-watch-namespace
enable-watch-namespace: PATCH = [{"op":"add","path":"/spec/template/spec/containers/0/env","value": [ {"name": "K8S_NAMESPACE", "valueFrom": {"fieldRef": { "fieldPath": "metadata.namespace" } } }, {"name": "WATCH_NAMESPACE", "value": "$$(K8S_NAMESPACE),watch-ns"} ] }]
enable-watch-namespace: manifests kustomize
cd config/manager && $(KUSTOMIZE) edit add patch --kind Deployment --patch '$(PATCH)'

.PHONY: prepare-e2e-watch-namespace
prepare-e2e-watch-namespace: chainsaw enable-watch-namespace prepare-e2e

# end-to-tests
.PHONY: e2e-watch-namespace
e2e-watch-namespace: chainsaw
$(CHAINSAW) test --test-dir ./tests/e2e-watch-namespace

.PHONY: scorecard-tests
scorecard-tests: operator-sdk
$(OPERATOR_SDK) scorecard -w=5m bundle || (echo "scorecard test failed" && exit 1)
Expand Down
7 changes: 7 additions & 0 deletions internal/config/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ type Config struct {
openshiftRoutesAvailability openshift.RoutesAvailability
labelsFilter []string
annotationsFilter []string
namespaces []string
}

// New constructs a new configuration based on the given options.
Expand Down Expand Up @@ -101,6 +102,7 @@ func New(opts ...Option) Config {
autoInstrumentationNginxImage: o.autoInstrumentationNginxImage,
labelsFilter: o.labelsFilter,
annotationsFilter: o.annotationsFilter,
namespaces: o.namespaces,
}
}

Expand Down Expand Up @@ -225,3 +227,8 @@ func (c *Config) LabelsFilter() []string {
func (c *Config) AnnotationsFilter() []string {
return c.annotationsFilter
}

// Namespaces Returns the namespaces to be watched.
func (c *Config) Namespaces() []string {
return c.namespaces
}
7 changes: 7 additions & 0 deletions internal/config/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ type options struct {
openshiftRoutesAvailability openshift.RoutesAvailability
labelsFilter []string
annotationsFilter []string
namespaces []string
}

func WithAutoDetect(a autodetect.AutoDetect) Option {
Expand Down Expand Up @@ -225,3 +226,9 @@ func WithAnnotationFilters(annotationFilters []string) Option {
o.annotationsFilter = filters
}
}

func WithNamespaces(namespaces []string) Option {
return func(o *options) {
o.namespaces = namespaces
}
}
9 changes: 8 additions & 1 deletion internal/webhook/podmutation/webhookhandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,11 @@ import (
"context"
"encoding/json"
"net/http"
"slices"

"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
Expand Down Expand Up @@ -89,7 +91,12 @@ func (p *podMutationWebhook) Handle(ctx context.Context, req admission.Request)
return res
}

for _, m := range p.podMutators {
var mutators []PodMutator
// mutate only in case the namespace is marked to be watched
if slices.Contains(p.config.Namespaces(), ns.Name) || slices.Contains(p.config.Namespaces(), metav1.NamespaceAll) {
mutators = p.podMutators
}
for _, m := range mutators {
pod, err = m.Mutate(ctx, ns, pod)
if err != nil {
res := admission.Errored(http.StatusInternalServerError, err)
Expand Down
30 changes: 18 additions & 12 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ import (
"strings"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should be merged with the group below

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#2666 (comment)

I'll work in another PR with log improvements since it is kind of expected behavior


routev1 "github.com/openshift/api/route/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/spf13/pflag"
Expand Down Expand Up @@ -195,6 +197,21 @@ func main() {
os.Exit(1)
}

var watchNamespaces []string
var namespaces map[string]cache.Config
watchNamespace, found := os.LookupEnv("WATCH_NAMESPACE")
if found {
setupLog.Info("watching namespace(s)", "namespaces", watchNamespace)
namespaces = map[string]cache.Config{}
watchNamespaces = strings.Split(watchNamespace, ",")
for _, ns := range watchNamespaces {
namespaces[ns] = cache.Config{}
}
} else {
setupLog.Info("the env var WATCH_NAMESPACE isn't set, watching all namespaces")
watchNamespaces = []string{metav1.NamespaceAll}
}

cfg := config.New(
config.WithLogger(ctrl.Log.WithName("config")),
config.WithVersion(v),
Expand All @@ -217,24 +234,13 @@ func main() {
config.WithAutoDetect(ad),
config.WithLabelFilters(labelsFilter),
config.WithAnnotationFilters(annotationsFilter),
config.WithNamespaces(watchNamespaces),
)
err = cfg.AutoDetect()
if err != nil {
setupLog.Error(err, "failed to autodetect config variables")
}

var namespaces map[string]cache.Config
watchNamespace, found := os.LookupEnv("WATCH_NAMESPACE")
if found {
setupLog.Info("watching namespace(s)", "namespaces", watchNamespace)
namespaces = map[string]cache.Config{}
for _, ns := range strings.Split(watchNamespace, ",") {
namespaces[ns] = cache.Config{}
}
} else {
setupLog.Info("the env var WATCH_NAMESPACE isn't set, watching all namespaces")
}

// see https://github.com/openshift/library-go/blob/4362aa519714a4b62b00ab8318197ba2bba51cb7/pkg/config/leaderelection/leaderelection.go#L104
leaseDuration := time.Second * 137
renewDeadline := time.Second * 107
Expand Down
24 changes: 24 additions & 0 deletions tests/e2e-watch-namespace/00-install-collector.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: opentelemetry.io/v1alpha1
kind: OpenTelemetryCollector
metadata:
name: deployment
namespace: opentelemetry-operator-system
spec:
config: |
receivers:
otlp:
protocols:
grpc:
http:
processors:

exporters:
debug:

service:
pipelines:
traces:
receivers: [otlp]
processors: []
exporters: [debug]
mode: deployment
8 changes: 8 additions & 0 deletions tests/e2e-watch-namespace/00-install-instrumentation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
apiVersion: opentelemetry.io/v1alpha1
kind: Instrumentation
metadata:
name: deployment
namespace: opentelemetry-operator-system
spec:
exporter:
endpoint: http://deployment-collector.opentelemetry-operator-system:4317
19 changes: 19 additions & 0 deletions tests/e2e-watch-namespace/01-deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-deploy
spec:
selector:
matchLabels:
app: my-deploy
replicas: 1
template:
metadata:
labels:
app: my-deploy
annotations:
instrumentation.opentelemetry.io/inject-java: "opentelemetry-operator-system/deployment"
spec:
containers:
- name: myapp
image: ghcr.io/open-telemetry/opentelemetry-operator/e2e-test-app-java:main
14 changes: 14 additions & 0 deletions tests/e2e-watch-namespace/not-watch-ns/01-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
instrumentation.opentelemetry.io/inject-java: "opentelemetry-operator-system/deployment"
labels:
app: my-deploy
spec:
(initContainers == null): true
(length(containers)): 1
containers:
- name: myapp
(env == null): true

35 changes: 35 additions & 0 deletions tests/e2e-watch-namespace/not-watch-ns/chainsaw-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: not-watch-ns
spec:
namespace: not-watch-ns
steps:
- name: step-00
skipDelete: true
try:
# when running in parallel it seems to conflict a bit on both tests creating the collector
# opentelemetrycollectors.opentelemetry.io "deployment" already exists
- sleep:
duration: 5s

- apply:
file: ../00-install-collector.yaml
- apply:
file: ../00-install-instrumentation.yaml
# wait not working :-(
- sleep:
duration: 10s
# Deployment
- name: step-01
try:
- apply:
file: ../01-deployment.yaml
- assert:
timeout: 60s
file: 01-assert.yaml
catch:
- podLogs:
selector: app=my-deploy
32 changes: 32 additions & 0 deletions tests/e2e-watch-namespace/watch-ns/01-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
instrumentation.opentelemetry.io/inject-java: "opentelemetry-operator-system/deployment"
labels:
app: my-deploy
spec:
initContainers:
- name: opentelemetry-auto-instrumentation-java
(volumeMounts[?name == 'opentelemetry-auto-instrumentation-java']):
- name: opentelemetry-auto-instrumentation-java
mountPath: /otel-auto-instrumentation-java
(containers[?name == 'myapp']):
- name: myapp
env:
- name: OTEL_NODE_IP
- name: OTEL_POD_IP
- name: JAVA_TOOL_OPTIONS
- name: OTEL_SERVICE_NAME
value: my-deploy
- name: OTEL_EXPORTER_OTLP_ENDPOINT
value: http://deployment-collector.opentelemetry-operator-system:4317
- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME
- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME
- name: OTEL_RESOURCE_ATTRIBUTES
(volumeMounts[?name == 'opentelemetry-auto-instrumentation-java']):
- name: opentelemetry-auto-instrumentation-java
mountPath: /otel-auto-instrumentation-java
(volumes[?name == 'opentelemetry-auto-instrumentation-java']):
- name: opentelemetry-auto-instrumentation-java
emptyDir: {}
55 changes: 55 additions & 0 deletions tests/e2e-watch-namespace/watch-ns/chainsaw-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
creationTimestamp: null
name: watch-ns
spec:
namespace: watch-ns
steps:
- name: step-00
skipDelete: true
try:
- apply:
file: ../00-install-collector.yaml
- apply:
file: ../00-install-instrumentation.yaml
# wait not working :-(
- sleep:
duration: 10s
# - command:
# entrypoint: kubectl
# args:
# - wait
# - pods
# - --for
# - condition=Ready="true"
# - -l
# - app.kubernetes.io/name=deployment-collector
# - -n
# - opentelemetry-operator-system
# - --timeout
# - 10s
## - 1m
# - -v
# - '8'
# - wait:
# resource: pods
# selector: 'app.kubernetes.io/name=deployment-collector'
# timeout: 10s
# namespace: opentelemetry-operator-system
# for:
# condition:
# name: Ready
# value: 'true'
# Deployment
- name: step-01
try:
- apply:
file: ../01-deployment.yaml
- assert:
timeout: 60s
file: 01-assert.yaml
catch:
- podLogs:
selector: app=my-deploy
Loading