Skip to content

Commit 09d6d2d

Browse files
authored
🌱 v1beta2 conditions: add function for setting the Paused condition (#11284)
* v1beta2 conditions: add function for setting the Paused condition * machine: set v1beta2 Paused condition * machineset: set v1beta2 Paused condition * machinedeployment: set v1beta2 Paused condition * kubeadmcontrolplane: set v1beta2 Paused condition * cluster: set v1beta2 Paused condition * kubeadmconfig: set v1beta2 Paused condition * machinepool: set v1beta2 Paused condition * machinehealthcheck: set v1beta2 Paused condition * clusterresourceset: set v1beta2 Paused condition * dockercluster: set v1beta2 Paused condition * dockermachine: set v1beta2 Paused condition * inmemorycluster: set v1beta2 Paused condition * inmemorymachine: set v1beta2 Paused condition * bootstrap/kubeadm/internal/builder/builders.go: use consts * util/predicates/cluster_predicates.go fix comment * review: cleanup predicates * paused: remove option stuff * machinedeployment: preserve v1beta2 status * drop additional paused check * Add ClusterPausedTransitionsOrInfrastructureReady predicate * review fixes * clusterclass: set v1beta2 Paused condition * fix for clusterclass / clusterctl * v1beta2conditions: export HasSameState * paused improvements * predicates fixup * fix test * review fixes
1 parent ccc430f commit 09d6d2d

File tree

29 files changed

+510
-150
lines changed

29 files changed

+510
-150
lines changed

‎api/v1beta1/machine_types.go

-6
Original file line numberDiff line numberDiff line change
@@ -273,12 +273,6 @@ const (
273273
MachineDeletingV1Beta2Condition = DeletingV1Beta2Condition
274274
)
275275

276-
// Machine's Paused condition and corresponding reasons that will be used in v1Beta2 API version.
277-
const (
278-
// MachinePausedV1Beta2Condition is true if the Machine or the Cluster it belongs to are paused.
279-
MachinePausedV1Beta2Condition = PausedV1Beta2Condition
280-
)
281-
282276
// ANCHOR: MachineSpec
283277

284278
// MachineSpec defines the desired state of Machine.

‎bootstrap/kubeadm/internal/builder/builders.go

+10
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2222
"k8s.io/apimachinery/pkg/runtime"
2323

24+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
2425
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
2526
)
2627

@@ -80,6 +81,15 @@ func (k *KubeadmConfigBuilder) Build() *bootstrapv1.KubeadmConfig {
8081
Namespace: k.namespace,
8182
Name: k.name,
8283
},
84+
Status: bootstrapv1.KubeadmConfigStatus{
85+
V1Beta2: &bootstrapv1.KubeadmConfigV1Beta2Status{
86+
Conditions: []metav1.Condition{{
87+
Type: clusterv1.PausedV1Beta2Condition,
88+
Status: metav1.ConditionFalse,
89+
Reason: clusterv1.NotPausedV1Beta2Reason,
90+
}},
91+
},
92+
},
8393
}
8494
if k.initConfig != nil {
8595
config.Spec.InitConfiguration = k.initConfig

‎bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go

+5-6
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,10 @@ import (
5555
"sigs.k8s.io/cluster-api/feature"
5656
"sigs.k8s.io/cluster-api/internal/util/taints"
5757
"sigs.k8s.io/cluster-api/util"
58-
"sigs.k8s.io/cluster-api/util/annotations"
5958
"sigs.k8s.io/cluster-api/util/conditions"
6059
clog "sigs.k8s.io/cluster-api/util/log"
6160
"sigs.k8s.io/cluster-api/util/patch"
61+
"sigs.k8s.io/cluster-api/util/paused"
6262
"sigs.k8s.io/cluster-api/util/predicates"
6363
"sigs.k8s.io/cluster-api/util/secret"
6464
)
@@ -117,7 +117,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
117117
Watches(
118118
&clusterv1.Machine{},
119119
handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc),
120-
).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue))
120+
).WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue))
121121

122122
if feature.Gates.Enabled(feature.MachinePool) {
123123
b = b.Watches(
@@ -131,7 +131,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
131131
handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmConfigs),
132132
builder.WithPredicates(
133133
predicates.All(mgr.GetScheme(), predicateLog,
134-
predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetScheme(), predicateLog),
134+
predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), predicateLog),
135135
predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue),
136136
),
137137
),
@@ -199,9 +199,8 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
199199
return ctrl.Result{}, err
200200
}
201201

202-
if annotations.IsPaused(cluster, config) {
203-
log.Info("Reconciliation is paused for this object")
204-
return ctrl.Result{}, nil
202+
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, config); err != nil || isPaused || conditionChanged {
203+
return ctrl.Result{}, err
205204
}
206205

207206
scope := &Scope{

‎bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,9 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t *
107107
machine,
108108
config,
109109
}
110-
myclient := fake.NewClientBuilder().WithObjects(objects...).Build()
110+
myclient := fake.NewClientBuilder().
111+
WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).
112+
WithObjects(objects...).Build()
111113

112114
k := &KubeadmConfigReconciler{
113115
Client: myclient,

‎cmd/clusterctl/client/cluster/topology.go

+5
Original file line numberDiff line numberDiff line change
@@ -520,6 +520,11 @@ func reconcileClusterClass(ctx context.Context, apiReader client.Reader, class c
520520
Client: reconcilerClient,
521521
}
522522

523+
// The first only reconciles the paused condition.
524+
if _, err := clusterClassReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: targetClusterClass}); err != nil {
525+
return nil, errors.Wrap(err, "failed to dry run the ClusterClass controller to reconcile the paused condition")
526+
}
527+
523528
if _, err := clusterClassReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: targetClusterClass}); err != nil {
524529
return nil, errors.Wrap(err, "failed to dry run the ClusterClass controller")
525530
}

‎controlplane/kubeadm/internal/controllers/controller.go

+5-6
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,11 @@ import (
4848
"sigs.k8s.io/cluster-api/internal/contract"
4949
"sigs.k8s.io/cluster-api/internal/util/ssa"
5050
"sigs.k8s.io/cluster-api/util"
51-
"sigs.k8s.io/cluster-api/util/annotations"
5251
"sigs.k8s.io/cluster-api/util/collections"
5352
"sigs.k8s.io/cluster-api/util/conditions"
5453
"sigs.k8s.io/cluster-api/util/finalizers"
5554
"sigs.k8s.io/cluster-api/util/patch"
55+
"sigs.k8s.io/cluster-api/util/paused"
5656
"sigs.k8s.io/cluster-api/util/predicates"
5757
"sigs.k8s.io/cluster-api/util/secret"
5858
"sigs.k8s.io/cluster-api/util/version"
@@ -99,14 +99,14 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg
9999
For(&controlplanev1.KubeadmControlPlane{}).
100100
Owns(&clusterv1.Machine{}).
101101
WithOptions(options).
102-
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
102+
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
103103
Watches(
104104
&clusterv1.Cluster{},
105105
handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmControlPlane),
106106
builder.WithPredicates(
107107
predicates.All(mgr.GetScheme(), predicateLog,
108108
predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue),
109-
predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetScheme(), predicateLog),
109+
predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), predicateLog),
110110
),
111111
),
112112
).
@@ -172,9 +172,8 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
172172
log = log.WithValues("Cluster", klog.KObj(cluster))
173173
ctx = ctrl.LoggerInto(ctx, log)
174174

175-
if annotations.IsPaused(cluster, kcp) {
176-
log.Info("Reconciliation is paused for this object")
177-
return ctrl.Result{}, nil
175+
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, kcp); err != nil || isPaused || conditionChanged {
176+
return ctrl.Result{}, err
178177
}
179178

180179
// Initialize the patch helper.

‎controlplane/kubeadm/internal/controllers/controller_test.go

+7
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,13 @@ func TestReconcileClusterNoEndpoints(t *testing.T) {
440440
},
441441
},
442442
},
443+
Status: controlplanev1.KubeadmControlPlaneStatus{
444+
V1Beta2: &controlplanev1.KubeadmControlPlaneV1Beta2Status{Conditions: []metav1.Condition{{
445+
Type: clusterv1.PausedV1Beta2Condition,
446+
Status: metav1.ConditionFalse,
447+
Reason: clusterv1.NotPausedV1Beta2Reason,
448+
}}},
449+
},
443450
}
444451
webhook := &controlplanev1webhooks.KubeadmControlPlane{}
445452
g.Expect(webhook.Default(ctx, kcp)).To(Succeed())

‎exp/addons/internal/controllers/clusterresourceset_controller.go

+7-4
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ import (
4848
"sigs.k8s.io/cluster-api/util/conditions"
4949
"sigs.k8s.io/cluster-api/util/finalizers"
5050
"sigs.k8s.io/cluster-api/util/patch"
51+
"sigs.k8s.io/cluster-api/util/paused"
5152
"sigs.k8s.io/cluster-api/util/predicates"
5253
)
5354

@@ -82,9 +83,7 @@ func (r *ClusterResourceSetReconciler) SetupWithManager(ctx context.Context, mgr
8283
handler.EnqueueRequestsFromMapFunc(
8384
resourceToClusterResourceSetFunc[client.Object](r.Client),
8485
),
85-
builder.WithPredicates(
86-
resourcepredicates.TypedResourceCreateOrUpdate[client.Object](predicateLog),
87-
),
86+
builder.WithPredicates(resourcepredicates.TypedResourceCreateOrUpdate[client.Object](predicateLog)),
8887
).
8988
WatchesRawSource(source.Kind(
9089
partialSecretCache,
@@ -100,7 +99,7 @@ func (r *ClusterResourceSetReconciler) SetupWithManager(ctx context.Context, mgr
10099
resourcepredicates.TypedResourceCreateOrUpdate[*metav1.PartialObjectMetadata](predicateLog),
101100
)).
102101
WithOptions(options).
103-
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
102+
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
104103
Complete(r)
105104
if err != nil {
106105
return errors.Wrap(err, "failed setting up with a controller manager")
@@ -129,6 +128,10 @@ func (r *ClusterResourceSetReconciler) Reconcile(ctx context.Context, req ctrl.R
129128
return ctrl.Result{}, err
130129
}
131130

131+
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, nil, clusterResourceSet); err != nil || isPaused || conditionChanged {
132+
return ctrl.Result{}, err
133+
}
134+
132135
// Initialize the patch helper.
133136
patchHelper, err := patch.NewHelper(clusterResourceSet, r.Client)
134137
if err != nil {

‎exp/internal/controllers/machinepool_controller.go

+5-7
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,10 @@ import (
4444
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
4545
"sigs.k8s.io/cluster-api/internal/util/ssa"
4646
"sigs.k8s.io/cluster-api/util"
47-
"sigs.k8s.io/cluster-api/util/annotations"
4847
"sigs.k8s.io/cluster-api/util/conditions"
4948
"sigs.k8s.io/cluster-api/util/finalizers"
5049
"sigs.k8s.io/cluster-api/util/patch"
50+
"sigs.k8s.io/cluster-api/util/paused"
5151
"sigs.k8s.io/cluster-api/util/predicates"
5252
)
5353

@@ -109,14 +109,14 @@ func (r *MachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M
109109
c, err := ctrl.NewControllerManagedBy(mgr).
110110
For(&expv1.MachinePool{}).
111111
WithOptions(options).
112-
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
112+
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
113113
Watches(
114114
&clusterv1.Cluster{},
115115
handler.EnqueueRequestsFromMapFunc(clusterToMachinePools),
116116
// TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources?
117117
builder.WithPredicates(
118118
predicates.All(mgr.GetScheme(), predicateLog,
119-
predicates.ClusterUnpaused(mgr.GetScheme(), predicateLog),
119+
predicates.ClusterPausedTransitions(mgr.GetScheme(), predicateLog),
120120
predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue),
121121
),
122122
),
@@ -168,10 +168,8 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request)
168168
mp.Spec.ClusterName, mp.Name, mp.Namespace)
169169
}
170170

171-
// Return early if the object or Cluster is paused.
172-
if annotations.IsPaused(cluster, mp) {
173-
log.Info("Reconciliation is paused for this object")
174-
return ctrl.Result{}, nil
171+
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, mp); err != nil || isPaused || conditionChanged {
172+
return ctrl.Result{}, err
175173
}
176174

177175
// Initialize the patch helper.

‎exp/internal/controllers/machinepool_controller_test.go

+60-7
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,13 @@ func TestMachinePoolOwnerReference(t *testing.T) {
158158
Replicas: ptr.To[int32](1),
159159
ClusterName: "invalid",
160160
},
161+
Status: expv1.MachinePoolStatus{
162+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
163+
Type: clusterv1.PausedV1Beta2Condition,
164+
Status: metav1.ConditionFalse,
165+
Reason: clusterv1.NotPausedV1Beta2Reason,
166+
}}},
167+
},
161168
}
162169

163170
machinePoolValidCluster := &expv1.MachinePool{
@@ -176,6 +183,13 @@ func TestMachinePoolOwnerReference(t *testing.T) {
176183
},
177184
ClusterName: "test-cluster",
178185
},
186+
Status: expv1.MachinePoolStatus{
187+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
188+
Type: clusterv1.PausedV1Beta2Condition,
189+
Status: metav1.ConditionFalse,
190+
Reason: clusterv1.NotPausedV1Beta2Reason,
191+
}}},
192+
},
179193
}
180194

181195
machinePoolValidMachinePool := &expv1.MachinePool{
@@ -197,6 +211,13 @@ func TestMachinePoolOwnerReference(t *testing.T) {
197211
},
198212
ClusterName: "test-cluster",
199213
},
214+
Status: expv1.MachinePoolStatus{
215+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
216+
Type: clusterv1.PausedV1Beta2Condition,
217+
Status: metav1.ConditionFalse,
218+
Reason: clusterv1.NotPausedV1Beta2Reason,
219+
}}},
220+
},
200221
}
201222

202223
testCases := []struct {
@@ -345,6 +366,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
345366
{Name: "test"},
346367
},
347368
ObservedGeneration: 1,
369+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
370+
Type: clusterv1.PausedV1Beta2Condition,
371+
Status: metav1.ConditionFalse,
372+
Reason: clusterv1.NotPausedV1Beta2Reason,
373+
}}},
348374
},
349375
},
350376
expected: expected{
@@ -390,6 +416,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
390416
Name: "test-node",
391417
},
392418
},
419+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
420+
Type: clusterv1.PausedV1Beta2Condition,
421+
Status: metav1.ConditionFalse,
422+
Reason: clusterv1.NotPausedV1Beta2Reason,
423+
}}},
393424
},
394425
},
395426
nodes: []corev1.Node{
@@ -447,6 +478,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
447478
Name: "test-node",
448479
},
449480
},
481+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
482+
Type: clusterv1.PausedV1Beta2Condition,
483+
Status: metav1.ConditionFalse,
484+
Reason: clusterv1.NotPausedV1Beta2Reason,
485+
}}},
450486
},
451487
},
452488
nodes: []corev1.Node{
@@ -504,6 +540,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
504540
Name: "test-node",
505541
},
506542
},
543+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
544+
Type: clusterv1.PausedV1Beta2Condition,
545+
Status: metav1.ConditionFalse,
546+
Reason: clusterv1.NotPausedV1Beta2Reason,
547+
}}},
507548
},
508549
},
509550
nodes: []corev1.Node{
@@ -820,6 +861,13 @@ func TestRemoveMachinePoolFinalizerAfterDeleteReconcile(t *testing.T) {
820861
},
821862
},
822863
},
864+
Status: expv1.MachinePoolStatus{
865+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
866+
Type: clusterv1.PausedV1Beta2Condition,
867+
Status: metav1.ConditionFalse,
868+
Reason: clusterv1.NotPausedV1Beta2Reason,
869+
}}},
870+
},
823871
}
824872
key := client.ObjectKey{Namespace: m.Namespace, Name: m.Name}
825873
clientFake := fake.NewClientBuilder().WithObjects(testCluster, m).WithStatusSubresource(&expv1.MachinePool{}).Build()
@@ -912,6 +960,13 @@ func TestMachinePoolConditions(t *testing.T) {
912960
},
913961
},
914962
},
963+
Status: expv1.MachinePoolStatus{
964+
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
965+
Type: clusterv1.PausedV1Beta2Condition,
966+
Status: metav1.ConditionFalse,
967+
Reason: clusterv1.NotPausedV1Beta2Reason,
968+
}}},
969+
},
915970
}
916971

917972
nodeList := corev1.NodeList{
@@ -951,14 +1006,12 @@ func TestMachinePoolConditions(t *testing.T) {
9511006
infrastructureReady: true,
9521007
beforeFunc: func(_, _ *unstructured.Unstructured, mp *expv1.MachinePool, _ *corev1.NodeList) {
9531008
mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"}
954-
mp.Status = expv1.MachinePoolStatus{
955-
NodeRefs: []corev1.ObjectReference{
956-
{Name: "node-1"},
957-
{Name: "azure-node-4"},
958-
},
959-
Replicas: 2,
960-
ReadyReplicas: 2,
1009+
mp.Status.NodeRefs = []corev1.ObjectReference{
1010+
{Name: "node-1"},
1011+
{Name: "azure-node-4"},
9611012
}
1013+
mp.Status.Replicas = 2
1014+
mp.Status.ReadyReplicas = 2
9621015
},
9631016
conditionAssertFunc: func(t *testing.T, getter conditions.Getter) {
9641017
t.Helper()

0 commit comments

Comments
 (0)