@@ -66,6 +66,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo
66
66
Expect (os .MkdirAll (input .ArtifactFolder , 0755 )).To (Succeed (), "Invalid argument. input.ArtifactFolder can't be created for %s spec" , specName )
67
67
68
68
Expect (input .E2EConfig .GetIntervals (specName , "wait-deployment-available" )).ToNot (BeNil ())
69
+ Expect (input .E2EConfig .GetIntervals (specName , "wait-machine-deleted" )).ToNot (BeNil ())
69
70
70
71
// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
71
72
namespace , cancelWatches = setupSpecNamespace (ctx , specName , input .BootstrapClusterProxy , input .ArtifactFolder )
@@ -109,7 +110,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo
109
110
110
111
By ("Scale the machinedeployment down to zero. If we didn't have the NodeDrainTimeout duration, the node drain process would block this operator." )
111
112
// Because all the machines of a machinedeployment can be deleted at the same time, so we only prepare the interval for 1 replica.
112
- nodeDrainTimeoutMachineDeploymentInterval := convertDurationToInterval ( machineDeployments [0 ].Spec .Template .Spec .NodeDrainTimeout , 1 )
113
+ nodeDrainTimeoutMachineDeploymentInterval := getDrainAndDeleteInterval ( input . E2EConfig . GetIntervals ( specName , "wait-machine-deleted" ), machineDeployments [0 ].Spec .Template .Spec .NodeDrainTimeout , 1 )
113
114
for _ , md := range machineDeployments {
114
115
framework .ScaleAndWaitMachineDeployment (ctx , framework.ScaleAndWaitMachineDeploymentInput {
115
116
ClusterProxy : input .BootstrapClusterProxy ,
@@ -131,7 +132,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo
131
132
132
133
By ("Scale down the controlplane of the workload cluster and make sure that nodes running workload can be deleted even the draining process is blocked." )
133
134
// When we scale down the KCP, controlplane machines are by default deleted one by one, so it requires more time.
134
- nodeDrainTimeoutKCPInterval := convertDurationToInterval ( controlplane .Spec .MachineTemplate .NodeDrainTimeout , controlPlaneReplicas )
135
+ nodeDrainTimeoutKCPInterval := getDrainAndDeleteInterval ( input . E2EConfig . GetIntervals ( specName , "wait-machine-deleted" ), controlplane .Spec .MachineTemplate .NodeDrainTimeout , controlPlaneReplicas )
135
136
framework .ScaleAndWaitControlPlane (ctx , framework.ScaleAndWaitControlPlaneInput {
136
137
ClusterProxy : input .BootstrapClusterProxy ,
137
138
Cluster : cluster ,
@@ -149,10 +150,11 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo
149
150
})
150
151
}
151
152
152
- func convertDurationToInterval (duration * metav1.Duration , replicas int ) []interface {} {
153
- pollingInterval := time .Second * 10
154
- // After the drain timeout is over, the cluster still needs more time to completely delete the machine, that why we need an extra 2-minute amount of time.
155
- intervalDuration := (duration .Duration + time .Minute * 2 ) * time .Duration (replicas )
156
- res := []interface {}{intervalDuration .String (), pollingInterval .String ()}
153
+ func getDrainAndDeleteInterval (deleteInterval []interface {}, drainTimeout * metav1.Duration , replicas int ) []interface {} {
154
+ deleteTimeout , err := time .ParseDuration (deleteInterval [0 ].(string ))
155
+ Expect (err ).NotTo (HaveOccurred ())
156
+ // We add the drain timeout to the specified delete timeout per replica.
157
+ intervalDuration := (drainTimeout .Duration + deleteTimeout ) * time .Duration (replicas )
158
+ res := []interface {}{intervalDuration .String (), deleteInterval [1 ]}
157
159
return res
158
160
}
0 commit comments