diff --git a/api/v1alpha1/testrun_types.go b/api/v1alpha1/testrun_types.go
index c8894201..280ac002 100644
--- a/api/v1alpha1/testrun_types.go
+++ b/api/v1alpha1/testrun_types.go
@@ -122,6 +122,11 @@ type TestRunSpec struct {
Cleanup Cleanup `json:"cleanup,omitempty"`
+ // TTLSecondsAfterFinished, when set, specifies the TTL for Jobs created by this TestRun
+ // after they finish successfully or fail. Mirrors Job's TTLSecondsAfterFinished behavior.
+ // +kubebuilder:validation:Minimum=0
+ TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
+
// TestRunID is reserved by Grafana Cloud k6. Do not set it manually.
TestRunID string `json:"testRunId,omitempty"` // PLZ reserved field
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 10d98b81..80dfc5b5 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -496,6 +496,11 @@ func (in *TestRunSpec) DeepCopyInto(out *TestRunSpec) {
in.Starter.DeepCopyInto(&out.Starter)
in.Runner.DeepCopyInto(&out.Runner)
out.Scuttle = in.Scuttle
+ if in.TTLSecondsAfterFinished != nil {
+ in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished
+ *out = new(int32)
+ **out = **in
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestRunSpec.
diff --git a/config/crd/bases/k6.io_testruns.yaml b/config/crd/bases/k6.io_testruns.yaml
index 24283c04..e9def0fc 100644
--- a/config/crd/bases/k6.io_testruns.yaml
+++ b/config/crd/bases/k6.io_testruns.yaml
@@ -5880,6 +5880,10 @@ spec:
type: string
token:
type: string
+ ttlSecondsAfterFinished:
+ format: int32
+ minimum: 0
+ type: integer
required:
- parallelism
- script
diff --git a/config/samples/k6_v1alpha1_configmap.yaml b/config/samples/k6_v1alpha1_configmap.yaml
index 1aa75a47..8934adf3 100644
--- a/config/samples/k6_v1alpha1_configmap.yaml
+++ b/config/samples/k6_v1alpha1_configmap.yaml
@@ -30,3 +30,16 @@ data:
failRate.add(result.status !== 200);
sleep(1);
}
+
+---
+apiVersion: k6.io/v1alpha1
+kind: TestRun
+metadata:
+ name: testrun-sample
+spec:
+ parallelism: 2
+ ttlSecondsAfterFinished: 600
+ script:
+ configMap:
+ name: k6-test
+ file: test.js
diff --git a/docs/crd-generated.md b/docs/crd-generated.md
index cdf63134..3b490a99 100644
--- a/docs/crd-generated.md
+++ b/docs/crd-generated.md
@@ -707,6 +707,17 @@ using the podAntiAffinity rule.
Token is reserved by Grafana Cloud k6. Do not set it manually.
false |
+
+ | ttlSecondsAfterFinished |
+ integer |
+
+ TTLSecondsAfterFinished, when set, specifies the TTL for Jobs created by this TestRun
+after they finish successfully or fail. Mirrors Job's TTLSecondsAfterFinished behavior.
+
+ Format: int32
+ Minimum: 0
+ |
+ false |
diff --git a/pkg/resources/jobs/initializer.go b/pkg/resources/jobs/initializer.go
index 94bb0cf9..87d4f4f2 100644
--- a/pkg/resources/jobs/initializer.go
+++ b/pkg/resources/jobs/initializer.go
@@ -134,5 +134,9 @@ func NewInitializerJob(k6 *v1alpha1.TestRun, argLine string) (*batchv1.Job, erro
},
}
- return job, nil
+ if k6.GetSpec().TTLSecondsAfterFinished != nil {
+ job.Spec.TTLSecondsAfterFinished = k6.GetSpec().TTLSecondsAfterFinished
+ }
+
+ return job, nil
}
diff --git a/pkg/resources/jobs/initializer_test.go b/pkg/resources/jobs/initializer_test.go
index 028c1ed8..9641618b 100644
--- a/pkg/resources/jobs/initializer_test.go
+++ b/pkg/resources/jobs/initializer_test.go
@@ -131,3 +131,33 @@ func TestNewInitializerJob(t *testing.T) {
t.Error(diff)
}
}
+
+func TestInitializerJob_TTLSecondsAfterFinished(t *testing.T) {
+ ttl := int32(600)
+
+ k6 := &v1alpha1.TestRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1alpha1.TestRunSpec{
+ Script: v1alpha1.K6Script{
+ ConfigMap: v1alpha1.K6Configmap{
+ Name: "test",
+ File: "test.js",
+ },
+ },
+ TTLSecondsAfterFinished: &ttl,
+ Initializer: &v1alpha1.Pod{},
+ },
+ }
+
+ job, err := NewInitializerJob(k6, "")
+ if err != nil {
+ t.Fatalf("NewInitializerJob errored: %v", err)
+ }
+
+ if job.Spec.TTLSecondsAfterFinished == nil || *job.Spec.TTLSecondsAfterFinished != ttl {
+ t.Fatalf("expected TTLSecondsAfterFinished=%d, got %v", ttl, job.Spec.TTLSecondsAfterFinished)
+ }
+}
diff --git a/pkg/resources/jobs/runner.go b/pkg/resources/jobs/runner.go
index 810dd84e..adbff0f2 100644
--- a/pkg/resources/jobs/runner.go
+++ b/pkg/resources/jobs/runner.go
@@ -160,58 +160,62 @@ func NewRunnerJob(k6 *v1alpha1.TestRun, index int, tokenInfo *cloud.TokenInfo) (
volumeMounts := script.VolumeMount()
volumeMounts = append(volumeMounts, k6.GetSpec().Runner.VolumeMounts...)
- job := &batchv1.Job{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: k6.NamespacedName().Namespace,
- Labels: runnerLabels,
- Annotations: runnerAnnotations,
- },
- Spec: batchv1.JobSpec{
- BackoffLimit: &zero32,
- Template: corev1.PodTemplateSpec{
- ObjectMeta: metav1.ObjectMeta{
- Labels: runnerLabels,
- Annotations: runnerAnnotations,
- },
- Spec: corev1.PodSpec{
- AutomountServiceAccountToken: &automountServiceAccountToken,
- ServiceAccountName: serviceAccountName,
- Hostname: name,
- RestartPolicy: corev1.RestartPolicyNever,
- Affinity: k6.GetSpec().Runner.Affinity,
- NodeSelector: k6.GetSpec().Runner.NodeSelector,
- Tolerations: k6.GetSpec().Runner.Tolerations,
- TopologySpreadConstraints: k6.GetSpec().Runner.TopologySpreadConstraints,
- SecurityContext: &k6.GetSpec().Runner.SecurityContext,
- ImagePullSecrets: k6.GetSpec().Runner.ImagePullSecrets,
- InitContainers: getInitContainers(&k6.GetSpec().Runner, script),
- Containers: []corev1.Container{{
- Image: image,
- ImagePullPolicy: k6.GetSpec().Runner.ImagePullPolicy,
- Name: "k6",
- Command: command,
- Env: env,
- Resources: k6.GetSpec().Runner.Resources,
- VolumeMounts: volumeMounts,
- Ports: ports,
- EnvFrom: k6.GetSpec().Runner.EnvFrom,
- LivenessProbe: generateProbe(k6.GetSpec().Runner.LivenessProbe),
- ReadinessProbe: generateProbe(k6.GetSpec().Runner.ReadinessProbe),
- SecurityContext: &k6.GetSpec().Runner.ContainerSecurityContext,
- }},
- TerminationGracePeriodSeconds: &zero,
- Volumes: volumes,
- },
- },
- },
- }
-
- if k6.GetSpec().Separate {
- job.Spec.Template.Spec.Affinity = newAntiAffinity()
- }
-
- return job, nil
+ job := &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: k6.NamespacedName().Namespace,
+ Labels: runnerLabels,
+ Annotations: runnerAnnotations,
+ },
+ Spec: batchv1.JobSpec{
+ BackoffLimit: &zero32,
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: runnerLabels,
+ Annotations: runnerAnnotations,
+ },
+ Spec: corev1.PodSpec{
+ AutomountServiceAccountToken: &automountServiceAccountToken,
+ ServiceAccountName: serviceAccountName,
+ Hostname: name,
+ RestartPolicy: corev1.RestartPolicyNever,
+ Affinity: k6.GetSpec().Runner.Affinity,
+ NodeSelector: k6.GetSpec().Runner.NodeSelector,
+ Tolerations: k6.GetSpec().Runner.Tolerations,
+ TopologySpreadConstraints: k6.GetSpec().Runner.TopologySpreadConstraints,
+ SecurityContext: &k6.GetSpec().Runner.SecurityContext,
+ ImagePullSecrets: k6.GetSpec().Runner.ImagePullSecrets,
+ InitContainers: getInitContainers(&k6.GetSpec().Runner, script),
+ Containers: []corev1.Container{{
+ Image: image,
+ ImagePullPolicy: k6.GetSpec().Runner.ImagePullPolicy,
+ Name: "k6",
+ Command: command,
+ Env: env,
+ Resources: k6.GetSpec().Runner.Resources,
+ VolumeMounts: volumeMounts,
+ Ports: ports,
+ EnvFrom: k6.GetSpec().Runner.EnvFrom,
+ LivenessProbe: generateProbe(k6.GetSpec().Runner.LivenessProbe),
+ ReadinessProbe: generateProbe(k6.GetSpec().Runner.ReadinessProbe),
+ SecurityContext: &k6.GetSpec().Runner.ContainerSecurityContext,
+ }},
+ TerminationGracePeriodSeconds: &zero,
+ Volumes: volumes,
+ },
+ },
+ },
+ }
+
+ if k6.GetSpec().Separate {
+ job.Spec.Template.Spec.Affinity = newAntiAffinity()
+ }
+
+ if k6.GetSpec().TTLSecondsAfterFinished != nil {
+ job.Spec.TTLSecondsAfterFinished = k6.GetSpec().TTLSecondsAfterFinished
+ }
+
+ return job, nil
}
func NewRunnerService(k6 *v1alpha1.TestRun, index int) (*corev1.Service, error) {
diff --git a/pkg/resources/jobs/runner_test.go b/pkg/resources/jobs/runner_test.go
index e5d7c175..04771f28 100644
--- a/pkg/resources/jobs/runner_test.go
+++ b/pkg/resources/jobs/runner_test.go
@@ -219,6 +219,37 @@ func TestNewAntiAffinity(t *testing.T) {
}
}
+func TestRunnerJob_TTLSecondsAfterFinished(t *testing.T) {
+ ttl := int32(300)
+
+ k6 := &v1alpha1.TestRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1alpha1.TestRunSpec{
+ Script: v1alpha1.K6Script{
+ ConfigMap: v1alpha1.K6Configmap{
+ Name: "test",
+ File: "test.js",
+ },
+ },
+ Runner: v1alpha1.Pod{},
+ TTLSecondsAfterFinished: &ttl,
+ Parallelism: 1,
+ },
+ }
+
+ job, err := NewRunnerJob(k6, 1, nil)
+ if err != nil {
+ t.Fatalf("NewRunnerJob errored: %v", err)
+ }
+
+ if job.Spec.TTLSecondsAfterFinished == nil || *job.Spec.TTLSecondsAfterFinished != ttl {
+ t.Fatalf("expected TTLSecondsAfterFinished=%d, got %v", ttl, job.Spec.TTLSecondsAfterFinished)
+ }
+}
+
func TestNewRunnerService(t *testing.T) {
expectedOutcome := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/resources/jobs/starter.go b/pkg/resources/jobs/starter.go
index 73ed75fa..aa19b80b 100644
--- a/pkg/resources/jobs/starter.go
+++ b/pkg/resources/jobs/starter.go
@@ -62,42 +62,48 @@ func NewStarterJob(k6 *v1alpha1.TestRun, hostname []string) *batchv1.Job {
resourceRequirements = k6.GetSpec().Starter.Resources
}
- return &batchv1.Job{
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-starter", k6.NamespacedName().Name),
- Namespace: k6.NamespacedName().Namespace,
- Labels: starterLabels,
- Annotations: starterAnnotations,
- },
- Spec: batchv1.JobSpec{
- Template: corev1.PodTemplateSpec{
- ObjectMeta: metav1.ObjectMeta{
- Labels: starterLabels,
- Annotations: starterAnnotations,
- },
- Spec: corev1.PodSpec{
- AutomountServiceAccountToken: &automountServiceAccountToken,
- ServiceAccountName: serviceAccountName,
- Affinity: k6.GetSpec().Starter.Affinity,
- NodeSelector: k6.GetSpec().Starter.NodeSelector,
- Tolerations: k6.GetSpec().Starter.Tolerations,
- TopologySpreadConstraints: k6.GetSpec().Starter.TopologySpreadConstraints,
- RestartPolicy: corev1.RestartPolicyNever,
- SecurityContext: &k6.GetSpec().Starter.SecurityContext,
- ImagePullSecrets: k6.GetSpec().Starter.ImagePullSecrets,
- Containers: []corev1.Container{
- containers.NewStartContainer(
- hostname,
- starterImage,
- k6.GetSpec().Starter.ImagePullPolicy,
- command,
- env,
- k6.GetSpec().Starter.ContainerSecurityContext,
- resourceRequirements,
- ),
- },
- },
- },
- },
- }
+ job := &batchv1.Job{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("%s-starter", k6.NamespacedName().Name),
+ Namespace: k6.NamespacedName().Namespace,
+ Labels: starterLabels,
+ Annotations: starterAnnotations,
+ },
+ Spec: batchv1.JobSpec{
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: starterLabels,
+ Annotations: starterAnnotations,
+ },
+ Spec: corev1.PodSpec{
+ AutomountServiceAccountToken: &automountServiceAccountToken,
+ ServiceAccountName: serviceAccountName,
+ Affinity: k6.GetSpec().Starter.Affinity,
+ NodeSelector: k6.GetSpec().Starter.NodeSelector,
+ Tolerations: k6.GetSpec().Starter.Tolerations,
+ TopologySpreadConstraints: k6.GetSpec().Starter.TopologySpreadConstraints,
+ RestartPolicy: corev1.RestartPolicyNever,
+ SecurityContext: &k6.GetSpec().Starter.SecurityContext,
+ ImagePullSecrets: k6.GetSpec().Starter.ImagePullSecrets,
+ Containers: []corev1.Container{
+ containers.NewStartContainer(
+ hostname,
+ starterImage,
+ k6.GetSpec().Starter.ImagePullPolicy,
+ command,
+ env,
+ k6.GetSpec().Starter.ContainerSecurityContext,
+ resourceRequirements,
+ ),
+ },
+ },
+ },
+ },
+ }
+
+ if k6.GetSpec().TTLSecondsAfterFinished != nil {
+ job.Spec.TTLSecondsAfterFinished = k6.GetSpec().TTLSecondsAfterFinished
+ }
+
+ return job
}
diff --git a/pkg/resources/jobs/starter_test.go b/pkg/resources/jobs/starter_test.go
index fc11fc15..b4923517 100644
--- a/pkg/resources/jobs/starter_test.go
+++ b/pkg/resources/jobs/starter_test.go
@@ -278,3 +278,32 @@ func TestNewStarterJobCustomResources(t *testing.T) {
t.Errorf("custom resources not applied: %v", diff)
}
}
+
+func TestStarterJob_TTLSecondsAfterFinished(t *testing.T) {
+ ttl := int32(120)
+
+ k6 := &v1alpha1.TestRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: v1alpha1.TestRunSpec{
+ Script: v1alpha1.K6Script{
+ ConfigMap: v1alpha1.K6Configmap{
+ Name: "test",
+ File: "test.js",
+ },
+ },
+ Starter: v1alpha1.Pod{
+ Image: "image",
+ },
+ TTLSecondsAfterFinished: &ttl,
+ },
+ }
+
+ job := NewStarterJob(k6, []string{"runner-0"})
+
+ if job.Spec.TTLSecondsAfterFinished == nil || *job.Spec.TTLSecondsAfterFinished != ttl {
+ t.Fatalf("expected TTLSecondsAfterFinished=%d, got %v", ttl, job.Spec.TTLSecondsAfterFinished)
+ }
+}