From a3ea3a1bd80799dc6b10fef6742fff22f4e6ccb4 Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Sat, 26 Jul 2025 21:59:10 +0300 Subject: [PATCH 1/2] vlsingle, vmsingle: do not mount emptyDir if data volume is present in --- api/operator/v1/vlagent_types.go | 4 +- config/crd/overlay/crd.yaml | 4 +- docs/CHANGELOG.md | 1 + docs/api.md | 4 +- .../operator/factory/build/container.go | 44 +++++ .../operator/factory/build/container_test.go | 176 +++++++++++++++++- .../operator/factory/vlsingle/vlogs.go | 8 +- .../operator/factory/vlsingle/vlsingle.go | 49 ++--- .../operator/factory/vmsingle/vmsingle.go | 86 ++------- .../operator/factory/vtsingle/vtsingle.go | 48 ++--- test/e2e/vlsingle_test.go | 5 +- test/e2e/vmsingle_test.go | 36 +++- test/e2e/vtsingle_test.go | 5 +- 13 files changed, 308 insertions(+), 162 deletions(-) diff --git a/api/operator/v1/vlagent_types.go b/api/operator/v1/vlagent_types.go index 65a465258..9c626f85e 100644 --- a/api/operator/v1/vlagent_types.go +++ b/api/operator/v1/vlagent_types.go @@ -55,10 +55,10 @@ type VLAgentSpec struct { // PodDisruptionBudget created by operator // +optional PodDisruptionBudget *vmv1beta1.EmbeddedPodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty"` - // StatefulStorage configures storage for StatefulSet + // Storage configures storage for StatefulSet // +optional Storage *vmv1beta1.StorageSpec `json:"storage,omitempty"` - // StatefulRollingUpdateStrategy allows configuration for strategyType + // RollingUpdateStrategy allows configuration for strategyType // set it to RollingUpdate for disabling operator statefulSet rollingUpdate // +optional RollingUpdateStrategy appsv1.StatefulSetUpdateStrategyType `json:"rollingUpdateStrategy,omitempty"` diff --git a/config/crd/overlay/crd.yaml b/config/crd/overlay/crd.yaml index 6dbed5b78..fedc4408e 100644 --- a/config/crd/overlay/crd.yaml +++ b/config/crd/overlay/crd.yaml @@ -1267,7 +1267,7 @@ spec: type: integer rollingUpdateStrategy: description: |- - StatefulRollingUpdateStrategy allows configuration for strategyType + RollingUpdateStrategy allows configuration for strategyType set it to RollingUpdate for disabling operator statefulSet rollingUpdate type: string runtimeClassName: @@ -1357,7 +1357,7 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true storage: - description: StatefulStorage configures storage for StatefulSet + description: Storage configures storage for StatefulSet properties: disableMountSubPath: description: |- diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index bfe4b09c5..50dbb23cf 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -24,6 +24,7 @@ SECURITY: upgrade Go builder from Go1.25.4 to Go1.25.5. See [the list of issues * BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): remove orphaned ServiceAccount and RBAC resources. See [#1665](https://github.com/VictoriaMetrics/operator/issues/1665). * BUGFIX: [vmanomaly](https://docs.victoriametrics.com/operator/resources/vmanomaly/): properly handle configuration which is missing `reader.queries` in either `configRawYaml` or `configSecret`. Previously, it would lead to panic. * BUGFIX: [vmanomaly](https://docs.victoriametrics.com/operator/resources/vmanomaly/): fix configuration parsing when running in [UI mode](https://docs.victoriametrics.com/anomaly-detection/ui/). Previously, configuration required to use `preset: ui:version` instead of `preset: ui`. +* BUGFIX: [vmsingle](https://docs.victoriametrics.com/operator/resources/vmsingle/), [vlsingle](https://docs.victoriametrics.com/operator/resources/vlsingle/) and [vmalertmanager](https://docs.victoriametrics.com/operator/resources/vmalertmanager): do not mount emptydir if storage data volume is already present in volumes list. Before it was impossible to mount external PVC without overriding default storageDataPath using `spec.extraArgs` and without having unneeded emptydir listed among pod volumes. Related issues [#1477](https://github.com/VictoriaMetrics/operator/issues/1477). ## [v0.66.0](https://github.com/VictoriaMetrics/operator/releases/tag/v0.66.0) diff --git a/docs/api.md b/docs/api.md index 8e2044542..fc48a3104 100644 --- a/docs/api.md +++ b/docs/api.md @@ -253,7 +253,7 @@ Appears in: [VLAgent](#vlagent) | replicaCount#
_integer_ | _(Optional)_
ReplicaCount is the expected size of the Application. | | resources#
_[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core)_ | _(Optional)_
Resources container resource request and limits, https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
if not defined default resources from operator config will be used | | revisionHistoryLimitCount#
_integer_ | _(Optional)_
The number of old ReplicaSets to retain to allow rollback in deployment or
maximum number of revisions that will be maintained in the Deployment revision history.
Has no effect at StatefulSets
Defaults to 10. | -| rollingUpdateStrategy#
_[StatefulSetUpdateStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetupdatestrategytype-v1-apps)_ | _(Optional)_
StatefulRollingUpdateStrategy allows configuration for strategyType
set it to RollingUpdate for disabling operator statefulSet rollingUpdate | +| rollingUpdateStrategy#
_[StatefulSetUpdateStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetupdatestrategytype-v1-apps)_ | _(Optional)_
RollingUpdateStrategy allows configuration for strategyType
set it to RollingUpdate for disabling operator statefulSet rollingUpdate | | runtimeClassName#
_string_ | _(Optional)_
RuntimeClassName - defines runtime class for kubernetes pod.
https://kubernetes.io/docs/concepts/containers/runtime-class/ | | schedulerName#
_string_ | _(Optional)_
SchedulerName - defines kubernetes scheduler name | | secrets#
_string array_ | _(Optional)_
Secrets is a list of Secrets in the same namespace as the Application
object, which shall be mounted into the Application container
at /etc/vm/secrets/SECRET_NAME folder | @@ -261,7 +261,7 @@ Appears in: [VLAgent](#vlagent) | serviceAccountName#
_string_ | _(Optional)_
ServiceAccountName is the name of the ServiceAccount to use to run the pods | | serviceScrapeSpec#
_[VMServiceScrapeSpec](#vmservicescrapespec)_ | _(Optional)_
ServiceScrapeSpec that will be added to vlagent VMServiceScrape spec | | serviceSpec#
_[AdditionalServiceSpec](#additionalservicespec)_ | _(Optional)_
ServiceSpec that will be added to vlagent service spec | -| storage#
_[StorageSpec](#storagespec)_ | _(Optional)_
StatefulStorage configures storage for StatefulSet | +| storage#
_[StorageSpec](#storagespec)_ | _(Optional)_
Storage configures storage for StatefulSet | | syslogSpec#
_[SyslogServerSpec](#syslogserverspec)_ | _(Optional)_
SyslogSpec defines syslog listener configuration | | terminationGracePeriodSeconds#
_integer_ | _(Optional)_
TerminationGracePeriodSeconds period for container graceful termination | | tolerations#
_[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#toleration-v1-core) array_ | _(Optional)_
Tolerations If specified, the pod's tolerations. | diff --git a/internal/controller/operator/factory/build/container.go b/internal/controller/operator/factory/build/container.go index edc7e4da5..93cdc326b 100644 --- a/internal/controller/operator/factory/build/container.go +++ b/internal/controller/operator/factory/build/container.go @@ -535,3 +535,47 @@ func AddSyslogTLSConfigToVolumes(dstVolumes []corev1.Volume, dstMounts []corev1. } return dstVolumes, dstMounts } + +func StorageVolumeMountsTo(volumes []corev1.Volume, mounts []corev1.VolumeMount, pvcSrc *corev1.PersistentVolumeClaimVolumeSource, volumeName, storagePath string) ([]corev1.Volume, []corev1.VolumeMount) { + var alreadyMounted bool + for _, volumeMount := range mounts { + if volumeMount.Name == volumeName { + alreadyMounted = true + break + } + } + if !alreadyMounted { + mounts = append(mounts, corev1.VolumeMount{ + Name: volumeName, + MountPath: storagePath, + }) + } + if pvcSrc != nil { + volumes = append(volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: pvcSrc, + }, + }) + return volumes, mounts + } + + var volumePresent bool + for _, volume := range volumes { + if volume.Name == volumeName { + volumePresent = true + break + } + } + if volumePresent { + return volumes, mounts + } + + volumes = append(volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) + return volumes, mounts +} diff --git a/internal/controller/operator/factory/build/container_test.go b/internal/controller/operator/factory/build/container_test.go index 9f26d8c12..32e5ce653 100644 --- a/internal/controller/operator/factory/build/container_test.go +++ b/internal/controller/operator/factory/build/container_test.go @@ -45,7 +45,6 @@ func Test_buildProbe(t *testing.T) { cr testBuildProbeCR validate func(corev1.Container) error } - f := func(o opts) { t.Helper() got := Probe(o.container, o.cr) @@ -349,5 +348,180 @@ func TestAddSyslogArgsTo(t *testing.T) { "-syslog.compressMethod.udp=zstd", } f(&spec, expected) +} + +func TestStorageVolumeMountsTo(t *testing.T) { + type opts struct { + pvcSrc *corev1.PersistentVolumeClaimVolumeSource + volumeName string + storagePath string + volumes []corev1.Volume + expectedVolumes []corev1.Volume + mounts []corev1.VolumeMount + expectedMounts []corev1.VolumeMount + } + f := func(o opts) { + t.Helper() + gotVolumes, gotMounts := StorageVolumeMountsTo(o.volumes, o.mounts, o.pvcSrc, o.volumeName, o.storagePath) + assert.Equal(t, o.expectedMounts, gotMounts) + assert.Equal(t, o.expectedVolumes, gotVolumes) + } + + // no PVC spec and no volumes and mounts + f(opts{ + volumeName: "test", + storagePath: "/test", + expectedVolumes: []corev1.Volume{{ + Name: "test", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }}, + expectedMounts: []corev1.VolumeMount{{ + Name: "test", + MountPath: "/test", + }}, + }) + + // with PVC spec and no volumes and mounts + f(opts{ + volumeName: "test", + storagePath: "/test", + pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + expectedVolumes: []corev1.Volume{{ + Name: "test", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + }, + }}, + expectedMounts: []corev1.VolumeMount{{ + Name: "test", + MountPath: "/test", + }}, + }) + + // with PVC spec and matching data volume + f(opts{ + volumes: []corev1.Volume{{ + Name: "test", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }}, + volumeName: "test", + storagePath: "/test", + pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + expectedVolumes: []corev1.Volume{ + { + Name: "test", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }, + { + Name: "test", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + }, + }, + }, + expectedMounts: []corev1.VolumeMount{{ + Name: "test", + MountPath: "/test", + }}, + }) + // with PVC spec and not matching data volume + f(opts{ + volumes: []corev1.Volume{{ + Name: "extra", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }}, + volumeName: "test", + storagePath: "/test", + pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + expectedVolumes: []corev1.Volume{ + { + Name: "extra", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }, + { + Name: "test", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + }, + }, + }, + expectedMounts: []corev1.VolumeMount{{ + Name: "test", + MountPath: "/test", + }}, + }) + + // with PVC spec and existing data volume mount + f(opts{ + volumes: []corev1.Volume{{ + Name: "extra", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }}, + mounts: []corev1.VolumeMount{{ + Name: "test", + MountPath: "/other-path", + }}, + volumeName: "test", + storagePath: "/test", + pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + expectedVolumes: []corev1.Volume{ + { + Name: "extra", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }, + { + Name: "test", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + }, + }, + }, + expectedMounts: []corev1.VolumeMount{{ + Name: "test", + MountPath: "/other-path", + }}, + }) } diff --git a/internal/controller/operator/factory/vlsingle/vlogs.go b/internal/controller/operator/factory/vlsingle/vlogs.go index da5c887b5..61db98bac 100644 --- a/internal/controller/operator/factory/vlsingle/vlogs.go +++ b/internal/controller/operator/factory/vlsingle/vlogs.go @@ -150,7 +150,7 @@ func makeVLogsPodSpec(r *vmv1beta1.VLogs) (*corev1.PodTemplateSpec, error) { // if customStorageDataPath is not empty, do not add pvc. shouldAddPVC := r.Spec.StorageDataPath == "" - storagePath := vlsingleDataDir + storagePath := dataDataDir if r.Spec.StorageDataPath != "" { storagePath = r.Spec.StorageDataPath } @@ -186,14 +186,14 @@ func makeVLogsPodSpec(r *vmv1beta1.VLogs) (*corev1.PodTemplateSpec, error) { if storageSpec == nil { volumes = append(volumes, corev1.Volume{ - Name: vlsingleDataVolumeName, + Name: dataVolumeName, VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }) } else if shouldAddPVC { volumes = append(volumes, corev1.Volume{ - Name: vlsingleDataVolumeName, + Name: dataVolumeName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: r.PrefixedName(), @@ -204,7 +204,7 @@ func makeVLogsPodSpec(r *vmv1beta1.VLogs) (*corev1.PodTemplateSpec, error) { volumes = append(volumes, r.Spec.Volumes...) vmMounts := []corev1.VolumeMount{ { - Name: vlsingleDataVolumeName, + Name: dataVolumeName, MountPath: storagePath, }, } diff --git a/internal/controller/operator/factory/vlsingle/vlsingle.go b/internal/controller/operator/factory/vlsingle/vlsingle.go index 7620d2f8c..4ad8e793b 100644 --- a/internal/controller/operator/factory/vlsingle/vlsingle.go +++ b/internal/controller/operator/factory/vlsingle/vlsingle.go @@ -24,8 +24,8 @@ import ( ) const ( - vlsingleDataDir = "/victoria-logs-data" - vlsingleDataVolumeName = "data" + dataDataDir = "/victoria-logs-data" + dataVolumeName = "data" tlsServerConfigMountPath = "/etc/vm/tls-server-secrets" ) @@ -69,7 +69,7 @@ func CreateOrUpdate(ctx context.Context, rclient client.Client, cr *vmv1.VLSingl return err } } - if cr.Spec.Storage != nil && cr.Spec.StorageDataPath == "" { + if cr.Spec.Storage != nil { if err := createOrUpdatePVC(ctx, rclient, cr, prevCR); err != nil { return err } @@ -156,10 +156,7 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) { args = append(args, fmt.Sprintf("-retention.maxDiskSpaceUsageBytes=%s", r.Spec.RetentionMaxDiskSpaceUsageBytes)) } - // if customStorageDataPath is not empty, do not add pvc. - shouldAddPVC := r.Spec.StorageDataPath == "" - - storagePath := vlsingleDataDir + storagePath := dataDataDir if r.Spec.StorageDataPath != "" { storagePath = r.Spec.StorageDataPath } @@ -193,37 +190,19 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) { var ports []corev1.ContainerPort ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal}) - volumes := []corev1.Volume{} - - storageSpec := r.Spec.Storage - - if storageSpec == nil { - volumes = append(volumes, corev1.Volume{ - Name: vlsingleDataVolumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }) - } else if shouldAddPVC { - volumes = append(volumes, corev1.Volume{ - Name: vlsingleDataVolumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: r.PrefixedName(), - }, - }, - }) - } + var volumes []corev1.Volume + var vmMounts []corev1.VolumeMount volumes = append(volumes, r.Spec.Volumes...) - vmMounts := []corev1.VolumeMount{ - { - Name: vlsingleDataVolumeName, - MountPath: storagePath, - }, - } - vmMounts = append(vmMounts, r.Spec.VolumeMounts...) + var pvcSrc *corev1.PersistentVolumeClaimVolumeSource + if r.Spec.Storage != nil { + pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: r.PrefixedName(), + } + } + volumes, vmMounts = build.StorageVolumeMountsTo(volumes, vmMounts, pvcSrc, dataVolumeName, storagePath) + for _, s := range r.Spec.Secrets { volumes = append(volumes, corev1.Volume{ Name: k8stools.SanitizeVolumeName("secret-" + s), diff --git a/internal/controller/operator/factory/vmsingle/vmsingle.go b/internal/controller/operator/factory/vmsingle/vmsingle.go index 3c700d513..d7e104b93 100644 --- a/internal/controller/operator/factory/vmsingle/vmsingle.go +++ b/internal/controller/operator/factory/vmsingle/vmsingle.go @@ -24,8 +24,8 @@ import ( ) const ( - vmSingleDataDir = "/victoria-metrics-data" - vmDataVolumeName = "data" + dataDataDir = "/victoria-metrics-data" + dataVolumeName = "data" streamAggrSecretKey = "config.yaml" ) @@ -83,7 +83,7 @@ func CreateOrUpdate(ctx context.Context, cr *vmv1beta1.VMSingle, rclient client. } } - if cr.Spec.Storage != nil && cr.Spec.StorageDataPath == "" { + if cr.Spec.Storage != nil { if err := createStorage(ctx, rclient, cr, prevCR); err != nil { return fmt.Errorf("cannot create storage: %w", err) } @@ -154,12 +154,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS args = append(args, fmt.Sprintf("-retentionPeriod=%s", cr.Spec.RetentionPeriod)) } - // if customStorageDataPath is not empty, do not add volumes - // and volumeMounts - // it's user responsibility to provide correct values - mustAddVolumeMounts := cr.Spec.StorageDataPath == "" - - storagePath := vmSingleDataDir + storagePath := dataDataDir if cr.Spec.StorageDataPath != "" { storagePath = cr.Spec.StorageDataPath } @@ -191,8 +186,17 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS var volumes []corev1.Volume var vmMounts []corev1.VolumeMount - volumes, vmMounts = addVolumeMountsTo(volumes, vmMounts, cr, mustAddVolumeMounts, storagePath) + volumes = append(volumes, cr.Spec.Volumes...) + vmMounts = append(vmMounts, cr.Spec.VolumeMounts...) + var pvcSpec *corev1.PersistentVolumeClaimVolumeSource + if cr.Spec.Storage != nil { + pvcSpec = &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: cr.PrefixedName(), + } + } + + volumes, vmMounts = build.StorageVolumeMountsTo(volumes, vmMounts, pvcSpec, dataVolumeName, storagePath) if cr.Spec.VMBackup != nil && cr.Spec.VMBackup.CredentialsSecret != nil { volumes = append(volumes, corev1.Volume{ Name: k8stools.SanitizeVolumeName("secret-" + cr.Spec.VMBackup.CredentialsSecret.Name), @@ -204,9 +208,6 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS }) } - volumes = append(volumes, cr.Spec.Volumes...) - vmMounts = append(vmMounts, cr.Spec.VolumeMounts...) - for _, s := range cr.Spec.Secrets { volumes = append(volumes, corev1.Volume{ Name: k8stools.SanitizeVolumeName("secret-" + s), @@ -275,7 +276,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS var initContainers []corev1.Container if cr.Spec.VMBackup != nil { - vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMBackup, cr.Spec.Port, storagePath, vmDataVolumeName, cr.Spec.ExtraArgs, false, cr.Spec.License) + vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMBackup, cr.Spec.Port, storagePath, dataVolumeName, cr.Spec.ExtraArgs, false, cr.Spec.License) if err != nil { return nil, err } @@ -285,7 +286,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS if cr.Spec.VMBackup.Restore != nil && cr.Spec.VMBackup.Restore.OnStart != nil && cr.Spec.VMBackup.Restore.OnStart.Enabled { - vmRestore, err := build.VMRestore(cr.Spec.VMBackup, storagePath, vmDataVolumeName) + vmRestore, err := build.VMRestore(cr.Spec.VMBackup, storagePath, dataVolumeName) if err != nil { return nil, err } @@ -455,58 +456,3 @@ func deleteOrphaned(ctx context.Context, rclient client.Client, cr *vmv1beta1.VM } return nil } - -func addVolumeMountsTo(volumes []corev1.Volume, vmMounts []corev1.VolumeMount, cr *vmv1beta1.VMSingle, mustAddVolumeMounts bool, storagePath string) ([]corev1.Volume, []corev1.VolumeMount) { - - switch { - case mustAddVolumeMounts: - // add volume and mount point by operator directly - vmMounts = append(vmMounts, corev1.VolumeMount{ - Name: vmDataVolumeName, - MountPath: storagePath}, - ) - - vlSource := corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - } - if cr.Spec.Storage != nil { - vlSource = corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: cr.PrefixedName(), - }, - } - } - volumes = append(volumes, corev1.Volume{ - Name: vmDataVolumeName, - VolumeSource: vlSource}) - - case len(cr.Spec.Volumes) > 0: - // add missing volumeMount point for backward compatibility - // it simplifies management of external PVCs - var volumeNamePresent bool - for _, volume := range cr.Spec.Volumes { - if volume.Name == vmDataVolumeName { - volumeNamePresent = true - break - } - } - if volumeNamePresent { - var mustSkipVolumeAdd bool - for _, volumeMount := range cr.Spec.VolumeMounts { - if volumeMount.Name == vmDataVolumeName { - mustSkipVolumeAdd = true - break - } - } - if !mustSkipVolumeAdd { - vmMounts = append(vmMounts, corev1.VolumeMount{ - Name: vmDataVolumeName, - MountPath: storagePath, - }) - } - } - - } - - return volumes, vmMounts -} diff --git a/internal/controller/operator/factory/vtsingle/vtsingle.go b/internal/controller/operator/factory/vtsingle/vtsingle.go index 66dcfd90c..ac92c0b7c 100644 --- a/internal/controller/operator/factory/vtsingle/vtsingle.go +++ b/internal/controller/operator/factory/vtsingle/vtsingle.go @@ -24,8 +24,8 @@ import ( ) const ( - vtsingleDataDir = "/victoria-traces-data" - vtsingleDataVolumeName = "data" + dataDataDir = "/victoria-traces-data" + dataVolumeName = "data" tlsServerConfigMountPath = "/etc/vm/tls-server-secrets" ) @@ -69,7 +69,7 @@ func CreateOrUpdate(ctx context.Context, rclient client.Client, cr *vmv1.VTSingl return err } } - if cr.Spec.Storage != nil && cr.Spec.StorageDataPath == "" { + if cr.Spec.Storage != nil { if err := createOrUpdatePVC(ctx, rclient, cr, prevCR); err != nil { return err } @@ -156,10 +156,7 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) { args = append(args, fmt.Sprintf("-retention.maxDiskSpaceUsageBytes=%s", r.Spec.RetentionMaxDiskSpaceUsageBytes)) } - // if customStorageDataPath is not empty, do not add pvc. - shouldAddPVC := r.Spec.StorageDataPath == "" - - storagePath := vtsingleDataDir + storagePath := dataDataDir if r.Spec.StorageDataPath != "" { storagePath = r.Spec.StorageDataPath } @@ -193,36 +190,17 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) { var ports []corev1.ContainerPort ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal}) - volumes := []corev1.Volume{} - - storageSpec := r.Spec.Storage - - if storageSpec == nil { - volumes = append(volumes, corev1.Volume{ - Name: vtsingleDataVolumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }) - } else if shouldAddPVC { - volumes = append(volumes, corev1.Volume{ - Name: vtsingleDataVolumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: r.PrefixedName(), - }, - }, - }) - } + var volumes []corev1.Volume + var vmMounts []corev1.VolumeMount volumes = append(volumes, r.Spec.Volumes...) - vmMounts := []corev1.VolumeMount{ - { - Name: vtsingleDataVolumeName, - MountPath: storagePath, - }, - } - vmMounts = append(vmMounts, r.Spec.VolumeMounts...) + var pvcSrc *corev1.PersistentVolumeClaimVolumeSource + if r.Spec.Storage != nil { + pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: r.PrefixedName(), + } + } + volumes, vmMounts = build.StorageVolumeMountsTo(volumes, vmMounts, pvcSrc, dataVolumeName, storagePath) for _, s := range r.Spec.Secrets { volumes = append(volumes, corev1.Volume{ diff --git a/test/e2e/vlsingle_test.go b/test/e2e/vlsingle_test.go index bc8cf17bf..fbc84c971 100644 --- a/test/e2e/vlsingle_test.go +++ b/test/e2e/vlsingle_test.go @@ -145,7 +145,6 @@ var _ = Describe("test vlsingle Controller", Label("vl", "single", "vlsingle"), }, RetentionPeriod: "1", StorageDataPath: "/custom-path/internal/dir", - Storage: &corev1.PersistentVolumeClaimSpec{}, }, }, func(cr *vmv1.VLSingle) { @@ -156,8 +155,8 @@ var _ = Describe("test vlsingle Controller", Label("vl", "single", "vlsingle"), Expect(ts.Containers).To(HaveLen(1)) Expect(ts.Volumes).To(HaveLen(2)) Expect(ts.Containers[0].VolumeMounts).To(HaveLen(2)) - Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("data")) - Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("unused")) + Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("unused")) + Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("data")) }), ) diff --git a/test/e2e/vmsingle_test.go b/test/e2e/vmsingle_test.go index a50828cf2..9a102f52f 100644 --- a/test/e2e/vmsingle_test.go +++ b/test/e2e/vmsingle_test.go @@ -215,7 +215,7 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() { Expect(*createdDeploy.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot).To(BeTrue()) }), - Entry("with data emptyDir", "emptydir", false, + Entry("with storage", "storage", false, &vmv1beta1.VMSingle{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -245,8 +245,34 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() { Expect(k8sClient.Get(ctx, createdChildObjects, &createdDeploy)).To(Succeed()) ts := createdDeploy.Spec.Template.Spec Expect(ts.Containers).To(HaveLen(1)) - Expect(ts.Volumes).To(BeEmpty()) - Expect(ts.Containers[0].VolumeMounts).To(BeEmpty()) + Expect(ts.Volumes).To(HaveLen(1)) + Expect(ts.Containers[0].VolumeMounts).To(HaveLen(1)) + }), + Entry("with empty dir", "emptydir", false, + &vmv1beta1.VMSingle{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + }, + Spec: vmv1beta1.VMSingleSpec{ + CommonApplicationDeploymentParams: vmv1beta1.CommonApplicationDeploymentParams{ + ReplicaCount: ptr.To[int32](1), + }, + CommonDefaultableParams: vmv1beta1.CommonDefaultableParams{ + UseStrictSecurity: ptr.To(false), + }, + RetentionPeriod: "1", + RemovePvcAfterDelete: true, + StorageDataPath: "/tmp/", + }, + }, + func(cr *vmv1beta1.VMSingle) { + createdChildObjects := types.NamespacedName{Namespace: namespace, Name: cr.PrefixedName()} + var createdDeploy appsv1.Deployment + Expect(k8sClient.Get(ctx, createdChildObjects, &createdDeploy)).To(Succeed()) + ts := createdDeploy.Spec.Template.Spec + Expect(ts.Containers).To(HaveLen(1)) + Expect(ts.Volumes).To(HaveLen(1)) + Expect(ts.Containers[0].VolumeMounts).To(HaveLen(1)) }), Entry("with external volume", "externalvolume", true, &vmv1beta1.VMSingle{ @@ -289,7 +315,6 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() { RetentionPeriod: "1", RemovePvcAfterDelete: true, StorageDataPath: "/custom-path/internal/dir", - Storage: &corev1.PersistentVolumeClaimSpec{}, VMBackup: &vmv1beta1.VMBackup{ Destination: "fs:///opt/backup", VolumeMounts: []corev1.VolumeMount{{Name: "backup", MountPath: "/opt/backup"}}, @@ -304,7 +329,8 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() { Expect(ts.Containers).To(HaveLen(2)) Expect(ts.Volumes).To(HaveLen(4)) Expect(ts.Containers[0].VolumeMounts).To(HaveLen(3)) - Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("data")) + Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("unused")) + Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("data")) Expect(ts.Containers[1].VolumeMounts).To(HaveLen(3)) Expect(ts.Containers[1].VolumeMounts[0].Name).To(Equal("data")) Expect(ts.Containers[1].VolumeMounts[1].Name).To(Equal("backup")) diff --git a/test/e2e/vtsingle_test.go b/test/e2e/vtsingle_test.go index 1c4ef97eb..239c326c1 100644 --- a/test/e2e/vtsingle_test.go +++ b/test/e2e/vtsingle_test.go @@ -145,7 +145,6 @@ var _ = Describe("test vtsingle Controller", Label("vt", "single", "vtsingle"), }, RetentionPeriod: "1", StorageDataPath: "/custom-path/internal/dir", - Storage: &corev1.PersistentVolumeClaimSpec{}, }, }, func(cr *vmv1.VTSingle) { @@ -156,8 +155,8 @@ var _ = Describe("test vtsingle Controller", Label("vt", "single", "vtsingle"), Expect(ts.Containers).To(HaveLen(1)) Expect(ts.Volumes).To(HaveLen(2)) Expect(ts.Containers[0].VolumeMounts).To(HaveLen(2)) - Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("data")) - Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("unused")) + Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("unused")) + Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("data")) }), ) From 31307026e3d6871b3161f4ed59f7f1947a7ca8d5 Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Wed, 3 Dec 2025 08:28:46 +0200 Subject: [PATCH 2/2] changed order of volume and mounts for backward compatibility --- .../operator/factory/build/backup.go | 26 +---- .../operator/factory/build/container.go | 70 ++++++------ .../operator/factory/build/container_test.go | 108 ++++++++++++------ .../operator/factory/vlsingle/vlogs.go | 38 ++---- .../operator/factory/vlsingle/vlsingle.go | 10 +- .../operator/factory/vmcluster/vmcluster.go | 5 +- .../operator/factory/vmsingle/vmsingle.go | 22 ++-- .../operator/factory/vtsingle/vtsingle.go | 10 +- test/e2e/vlsingle_test.go | 4 +- test/e2e/vmsingle_test.go | 4 +- test/e2e/vtsingle_test.go | 4 +- 11 files changed, 147 insertions(+), 154 deletions(-) diff --git a/internal/controller/operator/factory/build/backup.go b/internal/controller/operator/factory/build/backup.go index 35ba458a7..9924976e4 100644 --- a/internal/controller/operator/factory/build/backup.go +++ b/internal/controller/operator/factory/build/backup.go @@ -21,7 +21,8 @@ func VMBackupManager( ctx context.Context, cr *vmv1beta1.VMBackup, port string, - storagePath, dataVolumeName string, + storagePath string, + mounts []corev1.VolumeMount, extraArgs map[string]string, isCluster bool, license *vmv1beta1.License, @@ -85,16 +86,6 @@ func VMBackupManager( var ports []corev1.ContainerPort ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(cr.Port).IntVal}) - - mounts := []corev1.VolumeMount{ - { - Name: dataVolumeName, - MountPath: storagePath, - ReadOnly: false, - }, - } - mounts = append(mounts, cr.VolumeMounts...) - if cr.CredentialsSecret != nil { mounts = append(mounts, corev1.VolumeMount{ Name: k8stools.SanitizeVolumeName("secret-" + cr.CredentialsSecret.Name), @@ -171,9 +162,9 @@ func VMBackupManager( // VMRestore conditionally creates vmrestore container func VMRestore( cr *vmv1beta1.VMBackup, - storagePath, dataVolumeName string, + storagePath string, + mounts []corev1.VolumeMount, ) (*corev1.Container, error) { - args := []string{ fmt.Sprintf("-storageDataPath=%s", storagePath), "-eula", @@ -198,15 +189,6 @@ func VMRestore( var ports []corev1.ContainerPort ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(cr.Port).IntVal}) - mounts := []corev1.VolumeMount{ - { - Name: dataVolumeName, - MountPath: storagePath, - ReadOnly: false, - }, - } - mounts = append(mounts, cr.VolumeMounts...) - if cr.CredentialsSecret != nil { mounts = append(mounts, corev1.VolumeMount{ Name: k8stools.SanitizeVolumeName("secret-" + cr.CredentialsSecret.Name), diff --git a/internal/controller/operator/factory/build/container.go b/internal/controller/operator/factory/build/container.go index 93cdc326b..fed739b5d 100644 --- a/internal/controller/operator/factory/build/container.go +++ b/internal/controller/operator/factory/build/container.go @@ -2,6 +2,7 @@ package build import ( "fmt" + "path/filepath" "strings" corev1 "k8s.io/api/core/v1" @@ -15,6 +16,7 @@ import ( ) const probeTimeoutSeconds int32 = 5 +const DataVolumeName = "data" type probeCRD interface { Probe() *vmv1beta1.EmbeddedProbes @@ -536,46 +538,48 @@ func AddSyslogTLSConfigToVolumes(dstVolumes []corev1.Volume, dstMounts []corev1. return dstVolumes, dstMounts } -func StorageVolumeMountsTo(volumes []corev1.Volume, mounts []corev1.VolumeMount, pvcSrc *corev1.PersistentVolumeClaimVolumeSource, volumeName, storagePath string) ([]corev1.Volume, []corev1.VolumeMount) { - var alreadyMounted bool +func StorageVolumeMountsTo(volumes []corev1.Volume, mounts []corev1.VolumeMount, pvcSrc *corev1.PersistentVolumeClaimVolumeSource, storagePath, dataVolumeName string) ([]corev1.Volume, []corev1.VolumeMount, error) { + foundMount := false for _, volumeMount := range mounts { - if volumeMount.Name == volumeName { - alreadyMounted = true - break + rel, err := filepath.Rel(volumeMount.MountPath, storagePath) + if err == nil && !strings.HasPrefix(rel, "..") { + if volumeMount.Name == dataVolumeName { + foundMount = true + break + } + return nil, nil, fmt.Errorf( + "unexpected volume=%q mounted to path=%q, which is reserved for volume=%q, path=%q", + volumeMount.Name, volumeMount.MountPath, dataVolumeName, storagePath) + } else { + if volumeMount.Name != dataVolumeName { + continue + } + return nil, nil, fmt.Errorf( + "unexpected volume=%q mounted to path=%q, expected path=%q", + volumeMount.Name, volumeMount.MountPath, dataVolumeName) } } - if !alreadyMounted { - mounts = append(mounts, corev1.VolumeMount{ - Name: volumeName, + if !foundMount { + mounts = append([]corev1.VolumeMount{{ + Name: dataVolumeName, MountPath: storagePath, - }) - } - if pvcSrc != nil { - volumes = append(volumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: pvcSrc, - }, - }) - return volumes, mounts + }}, mounts...) } - var volumePresent bool for _, volume := range volumes { - if volume.Name == volumeName { - volumePresent = true - break + if volume.Name == dataVolumeName { + return volumes, mounts, nil } } - if volumePresent { - return volumes, mounts - } - - volumes = append(volumes, corev1.Volume{ - Name: volumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }) - return volumes, mounts + var source corev1.VolumeSource + if pvcSrc != nil { + source.PersistentVolumeClaim = pvcSrc + } else { + source.EmptyDir = &corev1.EmptyDirVolumeSource{} + } + volumes = append([]corev1.Volume{{ + Name: dataVolumeName, + VolumeSource: source, + }}, volumes...) + return volumes, mounts, nil } diff --git a/internal/controller/operator/factory/build/container_test.go b/internal/controller/operator/factory/build/container_test.go index 32e5ce653..8b0b80d46 100644 --- a/internal/controller/operator/factory/build/container_test.go +++ b/internal/controller/operator/factory/build/container_test.go @@ -353,45 +353,48 @@ func TestAddSyslogArgsTo(t *testing.T) { func TestStorageVolumeMountsTo(t *testing.T) { type opts struct { pvcSrc *corev1.PersistentVolumeClaimVolumeSource - volumeName string storagePath string volumes []corev1.Volume expectedVolumes []corev1.Volume mounts []corev1.VolumeMount expectedMounts []corev1.VolumeMount + wantErr bool } f := func(o opts) { t.Helper() - gotVolumes, gotMounts := StorageVolumeMountsTo(o.volumes, o.mounts, o.pvcSrc, o.volumeName, o.storagePath) + gotVolumes, gotMounts, err := StorageVolumeMountsTo(o.volumes, o.mounts, o.pvcSrc, o.storagePath, DataVolumeName) assert.Equal(t, o.expectedMounts, gotMounts) assert.Equal(t, o.expectedVolumes, gotVolumes) + if o.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } } // no PVC spec and no volumes and mounts f(opts{ - volumeName: "test", storagePath: "/test", expectedVolumes: []corev1.Volume{{ - Name: "test", + Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }}, expectedMounts: []corev1.VolumeMount{{ - Name: "test", + Name: DataVolumeName, MountPath: "/test", }}, }) // with PVC spec and no volumes and mounts f(opts{ - volumeName: "test", storagePath: "/test", pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: "test-claim", }, expectedVolumes: []corev1.Volume{{ - Name: "test", + Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: "test-claim", @@ -399,7 +402,7 @@ func TestStorageVolumeMountsTo(t *testing.T) { }, }}, expectedMounts: []corev1.VolumeMount{{ - Name: "test", + Name: DataVolumeName, MountPath: "/test", }}, }) @@ -407,38 +410,29 @@ func TestStorageVolumeMountsTo(t *testing.T) { // with PVC spec and matching data volume f(opts{ volumes: []corev1.Volume{{ - Name: "test", + Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ VolumeID: "aws-volume", }, }, }}, - volumeName: "test", storagePath: "/test", pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: "test-claim", }, expectedVolumes: []corev1.Volume{ { - Name: "test", + Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ VolumeID: "aws-volume", }, }, }, - { - Name: "test", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-claim", - }, - }, - }, }, expectedMounts: []corev1.VolumeMount{{ - Name: "test", + Name: DataVolumeName, MountPath: "/test", }}, }) @@ -453,31 +447,30 @@ func TestStorageVolumeMountsTo(t *testing.T) { }, }, }}, - volumeName: "test", storagePath: "/test", pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: "test-claim", }, expectedVolumes: []corev1.Volume{ { - Name: "extra", + Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ - AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ - VolumeID: "aws-volume", + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", }, }, }, { - Name: "test", + Name: "extra", VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-claim", + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", }, }, }, }, expectedMounts: []corev1.VolumeMount{{ - Name: "test", + Name: DataVolumeName, MountPath: "/test", }}, }) @@ -493,35 +486,76 @@ func TestStorageVolumeMountsTo(t *testing.T) { }, }}, mounts: []corev1.VolumeMount{{ - Name: "test", + Name: DataVolumeName, MountPath: "/other-path", }}, - volumeName: "test", + wantErr: true, storagePath: "/test", pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: "test-claim", }, + }) + + // with PVC spec and intersecting data volume mount + f(opts{ + volumes: []corev1.Volume{{ + Name: "extra", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }}, + mounts: []corev1.VolumeMount{{ + Name: DataVolumeName, + MountPath: "/test", + }}, + storagePath: "/test/data", + pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, expectedVolumes: []corev1.Volume{ { - Name: "extra", + Name: DataVolumeName, VolumeSource: corev1.VolumeSource{ - AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ - VolumeID: "aws-volume", + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", }, }, }, { - Name: "test", + Name: "extra", VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-claim", + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", }, }, }, }, expectedMounts: []corev1.VolumeMount{{ + Name: DataVolumeName, + MountPath: "/test", + }}, + }) + + // with PVC spec and intersecting volume mount and absent volume + f(opts{ + volumes: []corev1.Volume{{ + Name: "test", + VolumeSource: corev1.VolumeSource{ + AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws-volume", + }, + }, + }}, + mounts: []corev1.VolumeMount{{ Name: "test", - MountPath: "/other-path", + MountPath: "/test", }}, + storagePath: "/test/data", + pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-claim", + }, + wantErr: true, }) } diff --git a/internal/controller/operator/factory/vlsingle/vlogs.go b/internal/controller/operator/factory/vlsingle/vlogs.go index 61db98bac..60fb440e0 100644 --- a/internal/controller/operator/factory/vlsingle/vlogs.go +++ b/internal/controller/operator/factory/vlsingle/vlogs.go @@ -147,9 +147,6 @@ func makeVLogsPodSpec(r *vmv1beta1.VLogs) (*corev1.PodTemplateSpec, error) { fmt.Sprintf("-retentionPeriod=%s", r.Spec.RetentionPeriod), } - // if customStorageDataPath is not empty, do not add pvc. - shouldAddPVC := r.Spec.StorageDataPath == "" - storagePath := dataDataDir if r.Spec.StorageDataPath != "" { storagePath = r.Spec.StorageDataPath @@ -180,37 +177,18 @@ func makeVLogsPodSpec(r *vmv1beta1.VLogs) (*corev1.PodTemplateSpec, error) { var ports []corev1.ContainerPort ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal}) - volumes := []corev1.Volume{} - - storageSpec := r.Spec.Storage - if storageSpec == nil { - volumes = append(volumes, corev1.Volume{ - Name: dataVolumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }) - } else if shouldAddPVC { - volumes = append(volumes, corev1.Volume{ - Name: dataVolumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: r.PrefixedName(), - }, - }, - }) + var pvcSrc *corev1.PersistentVolumeClaimVolumeSource + if r.Spec.Storage != nil { + pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: r.PrefixedName(), + } } - volumes = append(volumes, r.Spec.Volumes...) - vmMounts := []corev1.VolumeMount{ - { - Name: dataVolumeName, - MountPath: storagePath, - }, + volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName) + if err != nil { + return nil, err } - vmMounts = append(vmMounts, r.Spec.VolumeMounts...) - for _, s := range r.Spec.Secrets { volumes = append(volumes, corev1.Volume{ Name: k8stools.SanitizeVolumeName("secret-" + s), diff --git a/internal/controller/operator/factory/vlsingle/vlsingle.go b/internal/controller/operator/factory/vlsingle/vlsingle.go index 4ad8e793b..847c1b617 100644 --- a/internal/controller/operator/factory/vlsingle/vlsingle.go +++ b/internal/controller/operator/factory/vlsingle/vlsingle.go @@ -25,7 +25,6 @@ import ( const ( dataDataDir = "/victoria-logs-data" - dataVolumeName = "data" tlsServerConfigMountPath = "/etc/vm/tls-server-secrets" ) @@ -190,10 +189,6 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) { var ports []corev1.ContainerPort ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal}) - var volumes []corev1.Volume - var vmMounts []corev1.VolumeMount - volumes = append(volumes, r.Spec.Volumes...) - vmMounts = append(vmMounts, r.Spec.VolumeMounts...) var pvcSrc *corev1.PersistentVolumeClaimVolumeSource if r.Spec.Storage != nil { @@ -201,7 +196,10 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) { ClaimName: r.PrefixedName(), } } - volumes, vmMounts = build.StorageVolumeMountsTo(volumes, vmMounts, pvcSrc, dataVolumeName, storagePath) + volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName) + if err != nil { + return nil, err + } for _, s := range r.Spec.Secrets { volumes = append(volumes, corev1.Volume{ diff --git a/internal/controller/operator/factory/vmcluster/vmcluster.go b/internal/controller/operator/factory/vmcluster/vmcluster.go index 3fd18618b..5a3432779 100644 --- a/internal/controller/operator/factory/vmcluster/vmcluster.go +++ b/internal/controller/operator/factory/vmcluster/vmcluster.go @@ -988,6 +988,7 @@ func makePodSpecForVMStorage(ctx context.Context, cr *vmv1beta1.VMCluster) (*cor args = append(args, fmt.Sprintf("-storageDataPath=%s", cr.Spec.VMStorage.StorageDataPath)) vmMounts = append(vmMounts, cr.Spec.VMStorage.VolumeMounts...) + commonMounts := vmMounts for _, s := range cr.Spec.VMStorage.Secrets { volumes = append(volumes, corev1.Volume{ @@ -1048,7 +1049,7 @@ func makePodSpecForVMStorage(ctx context.Context, cr *vmv1beta1.VMCluster) (*cor var initContainers []corev1.Container if cr.Spec.VMStorage.VMBackup != nil { - vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.Port, cr.Spec.VMStorage.StorageDataPath, cr.Spec.VMStorage.GetStorageVolumeName(), cr.Spec.VMStorage.ExtraArgs, true, cr.Spec.License) + vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.Port, cr.Spec.VMStorage.StorageDataPath, commonMounts, cr.Spec.VMStorage.ExtraArgs, true, cr.Spec.License) if err != nil { return nil, err } @@ -1058,7 +1059,7 @@ func makePodSpecForVMStorage(ctx context.Context, cr *vmv1beta1.VMCluster) (*cor if cr.Spec.VMStorage.VMBackup.Restore != nil && cr.Spec.VMStorage.VMBackup.Restore.OnStart != nil && cr.Spec.VMStorage.VMBackup.Restore.OnStart.Enabled { - vmRestore, err := build.VMRestore(cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.StorageDataPath, cr.Spec.VMStorage.GetStorageVolumeName()) + vmRestore, err := build.VMRestore(cr.Spec.VMStorage.VMBackup, cr.Spec.VMStorage.StorageDataPath, commonMounts) if err != nil { return nil, err } diff --git a/internal/controller/operator/factory/vmsingle/vmsingle.go b/internal/controller/operator/factory/vmsingle/vmsingle.go index d7e104b93..9b12582dd 100644 --- a/internal/controller/operator/factory/vmsingle/vmsingle.go +++ b/internal/controller/operator/factory/vmsingle/vmsingle.go @@ -25,7 +25,6 @@ import ( const ( dataDataDir = "/victoria-metrics-data" - dataVolumeName = "data" streamAggrSecretKey = "config.yaml" ) @@ -183,20 +182,19 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(cr.Spec.Port).IntVal}) ports = build.AppendInsertPorts(ports, cr.Spec.InsertPorts) - var volumes []corev1.Volume - var vmMounts []corev1.VolumeMount - - volumes = append(volumes, cr.Spec.Volumes...) - vmMounts = append(vmMounts, cr.Spec.VolumeMounts...) - - var pvcSpec *corev1.PersistentVolumeClaimVolumeSource + var pvcSrc *corev1.PersistentVolumeClaimVolumeSource if cr.Spec.Storage != nil { - pvcSpec = &corev1.PersistentVolumeClaimVolumeSource{ + pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: cr.PrefixedName(), } } - volumes, vmMounts = build.StorageVolumeMountsTo(volumes, vmMounts, pvcSpec, dataVolumeName, storagePath) + volumes, vmMounts, err := build.StorageVolumeMountsTo(cr.Spec.Volumes, cr.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName) + if err != nil { + return nil, err + } + commonMounts := vmMounts + if cr.Spec.VMBackup != nil && cr.Spec.VMBackup.CredentialsSecret != nil { volumes = append(volumes, corev1.Volume{ Name: k8stools.SanitizeVolumeName("secret-" + cr.Spec.VMBackup.CredentialsSecret.Name), @@ -276,7 +274,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS var initContainers []corev1.Container if cr.Spec.VMBackup != nil { - vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMBackup, cr.Spec.Port, storagePath, dataVolumeName, cr.Spec.ExtraArgs, false, cr.Spec.License) + vmBackupManagerContainer, err := build.VMBackupManager(ctx, cr.Spec.VMBackup, cr.Spec.Port, storagePath, commonMounts, cr.Spec.ExtraArgs, false, cr.Spec.License) if err != nil { return nil, err } @@ -286,7 +284,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS if cr.Spec.VMBackup.Restore != nil && cr.Spec.VMBackup.Restore.OnStart != nil && cr.Spec.VMBackup.Restore.OnStart.Enabled { - vmRestore, err := build.VMRestore(cr.Spec.VMBackup, storagePath, dataVolumeName) + vmRestore, err := build.VMRestore(cr.Spec.VMBackup, storagePath, commonMounts) if err != nil { return nil, err } diff --git a/internal/controller/operator/factory/vtsingle/vtsingle.go b/internal/controller/operator/factory/vtsingle/vtsingle.go index ac92c0b7c..7cd8a70bc 100644 --- a/internal/controller/operator/factory/vtsingle/vtsingle.go +++ b/internal/controller/operator/factory/vtsingle/vtsingle.go @@ -25,7 +25,6 @@ import ( const ( dataDataDir = "/victoria-traces-data" - dataVolumeName = "data" tlsServerConfigMountPath = "/etc/vm/tls-server-secrets" ) @@ -190,17 +189,16 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) { var ports []corev1.ContainerPort ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(r.Spec.Port).IntVal}) - var volumes []corev1.Volume - var vmMounts []corev1.VolumeMount - volumes = append(volumes, r.Spec.Volumes...) - vmMounts = append(vmMounts, r.Spec.VolumeMounts...) var pvcSrc *corev1.PersistentVolumeClaimVolumeSource if r.Spec.Storage != nil { pvcSrc = &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: r.PrefixedName(), } } - volumes, vmMounts = build.StorageVolumeMountsTo(volumes, vmMounts, pvcSrc, dataVolumeName, storagePath) + volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName) + if err != nil { + return nil, err + } for _, s := range r.Spec.Secrets { volumes = append(volumes, corev1.Volume{ diff --git a/test/e2e/vlsingle_test.go b/test/e2e/vlsingle_test.go index fbc84c971..889608fa6 100644 --- a/test/e2e/vlsingle_test.go +++ b/test/e2e/vlsingle_test.go @@ -155,8 +155,8 @@ var _ = Describe("test vlsingle Controller", Label("vl", "single", "vlsingle"), Expect(ts.Containers).To(HaveLen(1)) Expect(ts.Volumes).To(HaveLen(2)) Expect(ts.Containers[0].VolumeMounts).To(HaveLen(2)) - Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("unused")) - Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("data")) + Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("data")) + Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("unused")) }), ) diff --git a/test/e2e/vmsingle_test.go b/test/e2e/vmsingle_test.go index 9a102f52f..4e1de24a2 100644 --- a/test/e2e/vmsingle_test.go +++ b/test/e2e/vmsingle_test.go @@ -329,8 +329,8 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() { Expect(ts.Containers).To(HaveLen(2)) Expect(ts.Volumes).To(HaveLen(4)) Expect(ts.Containers[0].VolumeMounts).To(HaveLen(3)) - Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("unused")) - Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("data")) + Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("data")) + Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("unused")) Expect(ts.Containers[1].VolumeMounts).To(HaveLen(3)) Expect(ts.Containers[1].VolumeMounts[0].Name).To(Equal("data")) Expect(ts.Containers[1].VolumeMounts[1].Name).To(Equal("backup")) diff --git a/test/e2e/vtsingle_test.go b/test/e2e/vtsingle_test.go index 239c326c1..e5756c7f6 100644 --- a/test/e2e/vtsingle_test.go +++ b/test/e2e/vtsingle_test.go @@ -155,8 +155,8 @@ var _ = Describe("test vtsingle Controller", Label("vt", "single", "vtsingle"), Expect(ts.Containers).To(HaveLen(1)) Expect(ts.Volumes).To(HaveLen(2)) Expect(ts.Containers[0].VolumeMounts).To(HaveLen(2)) - Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("unused")) - Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("data")) + Expect(ts.Containers[0].VolumeMounts[0].Name).To(Equal("data")) + Expect(ts.Containers[0].VolumeMounts[1].Name).To(Equal("unused")) }), )