From 01ee7ebe2dd4deb89756036bd1d341a6c21762c7 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 17 Apr 2025 15:40:25 +0200 Subject: [PATCH 01/20] Accept usage of clusterv1.ResizedV1Beta1Condition (it is for v1beta1 conditions management only) --- internal/controllers/machineset/machineset_controller.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index b75ea4dae268..388f68f10735 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -1263,7 +1263,6 @@ func (r *Reconciler) reconcileStatus(ctx context.Context, s *scope) error { // Make sure last resize operation is marked as completed. // NOTE: we are checking the number of machines ready so we report resize completed only when the machines // are actually provisioned (vs reporting completed immediately after the last machine object is created). This convention is also used by KCP. - // TODO (v1beta2) Use new replica counters if newStatus.Deprecated.V1Beta1.ReadyReplicas == newStatus.Replicas { if v1beta1conditions.IsFalse(ms, clusterv1.ResizedV1Beta1Condition) { log.Info("All the replicas are ready", "replicas", newStatus.Deprecated.V1Beta1.ReadyReplicas) From faa9cc26c02f2fc27f437fade222fb8562ab776d Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 17 Apr 2025 16:48:20 +0200 Subject: [PATCH 02/20] Stop using clusterv1.ClusterClassVariablesReconciledV1Beta1Condition in controllers --- .../topology/cluster/cluster_controller.go | 9 ++- .../cluster/cluster_controller_test.go | 44 +++++++++--- internal/webhooks/cluster.go | 7 +- internal/webhooks/cluster_test.go | 70 +++++++++++++++---- util/test/builder/builders.go | 9 +-- util/test/builder/zz_generated.deepcopy.go | 2 +- 6 files changed, 104 insertions(+), 37 deletions(-) diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index ef427728d058..360709c6414b 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -62,7 +62,7 @@ import ( "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -350,10 +350,9 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope.Scope) (ctrl.Result // is not up to date. // Note: This doesn't require requeue as a change to ClusterClass observedGeneration will cause an additional reconcile // in the Cluster. - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) || - v1beta1conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) { - return ctrl.Result{}, errors.Errorf("ClusterClass is not successfully reconciled: status of %s condition on ClusterClass must be \"True\"", clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + if !conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReadyCondition) || + conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReadyCondition) { + return ctrl.Result{}, errors.Errorf("ClusterClass is not successfully reconciled: status of %s condition on ClusterClass must be \"True\"", clusterv1.ClusterClassVariablesReadyCondition) } if clusterClass.GetGeneration() != clusterClass.Status.ObservedGeneration { return ctrl.Result{}, errors.Errorf("ClusterClass is not successfully reconciled: ClusterClass.status.observedGeneration must be %d, but is %d", clusterClass.GetGeneration(), clusterClass.Status.ObservedGeneration) diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 5b230bc77c1f..3cd7d99a1df5 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -1314,7 +1314,11 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, }, }). - WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledV1Beta1Condition)). + WithConditions(metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }). Build(), initialCluster: clusterBuilder.DeepCopy(). Build(), @@ -1342,7 +1346,11 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, }, }). - WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledV1Beta1Condition)). + WithConditions(metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }). Build(), initialCluster: clusterBuilder.DeepCopy().WithTopology(topologyBase.DeepCopy().WithVariables( clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}). @@ -1398,7 +1406,11 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, }, }...). - WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledV1Beta1Condition)). + WithConditions(metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }). Build(), initialCluster: clusterBuilder.DeepCopy(). WithTopology(topologyBase.DeepCopy(). @@ -1505,7 +1517,11 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledV1Beta1Condition)). + WithConditions(metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }). Build(), cluster: clusterBuilder.DeepCopy(). Build(), @@ -1523,7 +1539,11 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledV1Beta1Condition)). + WithConditions(metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }). Build(), cluster: clusterBuilder. Build(), @@ -1541,12 +1561,16 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*v1beta1conditions.FalseCondition(clusterv1.ClusterClassVariablesReconciledV1Beta1Condition, clusterv1.VariableDiscoveryFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "error message")). + WithConditions(metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.ClusterClassVariablesReadyVariableDiscoveryFailedReason, + }). Build(), cluster: clusterBuilder. Build(), wantValidationErr: true, - wantValidationErrMessage: "ClusterClass is not successfully reconciled: status of VariablesReconciled condition on ClusterClass must be \"True\"", + wantValidationErrMessage: "ClusterClass is not successfully reconciled: status of VariablesReady condition on ClusterClass must be \"True\"", }, { name: "Cluster invalid as it defines an MDTopology without a corresponding MDClass", @@ -1560,7 +1584,11 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, }, }). - WithConditions(*v1beta1conditions.TrueCondition(clusterv1.ClusterClassVariablesReconciledV1Beta1Condition)). + WithConditions(metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }). Build(), cluster: clusterBuilder.WithTopology( builder.ClusterTopology().DeepCopy(). diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index 03db7f54e2eb..10faa26c10ae 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -44,7 +44,7 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/topology/check" "sigs.k8s.io/cluster-api/internal/topology/variables" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/version" ) @@ -1000,9 +1000,8 @@ func clusterClassIsReconciled(clusterClass *clusterv1.ClusterClass) error { return errClusterClassNotReconciled } // If the clusterClass does not have ClusterClassVariablesReconciled==True, the ClusterClass has not been successfully reconciled. - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) || - v1beta1conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) { + if !conditions.Has(clusterClass, clusterv1.ClusterClassVariablesReadyCondition) || + conditions.IsFalse(clusterClass, clusterv1.ClusterClassVariablesReadyCondition) { return errClusterClassNotReconciled } return nil diff --git a/internal/webhooks/cluster_test.go b/internal/webhooks/cluster_test.go index eb0ca95c9fda..67d7006f6ad4 100644 --- a/internal/webhooks/cluster_test.go +++ b/internal/webhooks/cluster_test.go @@ -43,7 +43,7 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta2" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/webhooks/util" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -100,7 +100,11 @@ func TestClusterTopologyDefaultNamespaces(t *testing.T) { WithControlPlaneInfrastructureMachineTemplate(&unstructured.Unstructured{}). WithWorkerMachineDeploymentClasses(*builder.MachineDeploymentClass("aa").Build()). Build() - v1beta1conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(clusterClass, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) // Sets up the fakeClient for the test case. This is required because the test uses a Managed Topology. fakeClient := fake.NewClientBuilder(). WithObjects(clusterClass). @@ -1304,7 +1308,11 @@ func TestClusterDefaultAndValidateVariables(t *testing.T) { Build() // Mark this condition to true so the webhook sees the ClusterClass as up to date. - v1beta1conditions.MarkTrue(tt.clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(tt.clusterClass, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) fakeClient := fake.NewClientBuilder(). WithObjects(tt.clusterClass). WithScheme(fakeScheme). @@ -1373,7 +1381,11 @@ func TestClusterDefaultTopologyVersion(t *testing.T) { Build() clusterClass := builder.ClusterClass("fooboo", "foo").Build() - v1beta1conditions.MarkTrue(clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(clusterClass, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) // Sets up the fakeClient for the test case. This is required because the test uses a Managed Topology. fakeClient := fake.NewClientBuilder(). WithObjects(clusterClass). @@ -2186,7 +2198,11 @@ func TestClusterTopologyValidation(t *testing.T) { Build() // Mark this condition to true so the webhook sees the ClusterClass as up to date. - v1beta1conditions.MarkTrue(class, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(class, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). WithObjects(class). @@ -2508,7 +2524,11 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. if tt.classReconciled { - v1beta1conditions.MarkTrue(tt.class, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(tt.class, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) } // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). @@ -3040,8 +3060,16 @@ func TestClusterTopologyValidationForTopologyClassChange(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. - v1beta1conditions.MarkTrue(tt.firstClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) - v1beta1conditions.MarkTrue(tt.secondClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(tt.firstClass, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) + conditions.Set(tt.secondClass, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). @@ -3165,7 +3193,11 @@ func TestMovingBetweenManagedAndUnmanaged(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. - v1beta1conditions.MarkTrue(tt.clusterClass, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(tt.clusterClass, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) // Sets up the fakeClient for the test case. fakeClient := fake.NewClientBuilder(). WithObjects(tt.clusterClass, tt.cluster). @@ -3216,7 +3248,11 @@ func TestClusterClassPollingErrors(t *testing.T) { ccFullyReconciled := baseClusterClass.DeepCopy().Build() ccFullyReconciled.Generation = 1 ccFullyReconciled.Status.ObservedGeneration = 1 - v1beta1conditions.MarkTrue(ccFullyReconciled, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(ccFullyReconciled, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) // secondFullyReconciled is a second ClusterClass with a matching generation and observed generation, and VariablesReconciled=True. secondFullyReconciled := ccFullyReconciled.DeepCopy() @@ -3226,11 +3262,19 @@ func TestClusterClassPollingErrors(t *testing.T) { ccGenerationMismatch := baseClusterClass.DeepCopy().Build() ccGenerationMismatch.Generation = 999 ccGenerationMismatch.Status.ObservedGeneration = 1 - v1beta1conditions.MarkTrue(ccGenerationMismatch, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition) + conditions.Set(ccGenerationMismatch, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterClassVariablesReadyReason, + }) // ccVariablesReconciledFalse with VariablesReconciled=False. - ccVariablesReconciledFalse := baseClusterClass.DeepCopy().Build() - v1beta1conditions.MarkFalse(ccGenerationMismatch, clusterv1.ClusterClassVariablesReconciledV1Beta1Condition, "", clusterv1.ConditionSeverityError, "") + ccVariablesReconciledFalse := baseClusterClass.Build().DeepCopy() + conditions.Set(ccVariablesReconciledFalse, metav1.Condition{ + Type: clusterv1.ClusterClassVariablesReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.ClusterClassVariablesReadyVariableDiscoveryFailedReason, + }) tests := []struct { name string diff --git a/util/test/builder/builders.go b/util/test/builder/builders.go index 282ff27e1198..5ec7a0ac95c9 100644 --- a/util/test/builder/builders.go +++ b/util/test/builder/builders.go @@ -353,7 +353,7 @@ type ClusterClassBuilder struct { variables []clusterv1.ClusterClassVariable statusVariables []clusterv1.ClusterClassStatusVariable patches []clusterv1.ClusterClassPatch - conditions clusterv1.Conditions + conditions []metav1.Condition } // ClusterClass returns a ClusterClassBuilder with the given name and namespace. @@ -446,7 +446,7 @@ func (c *ClusterClassBuilder) WithStatusVariables(vars ...clusterv1.ClusterClass } // WithConditions adds the conditions to the ClusterClassBuilder. -func (c *ClusterClassBuilder) WithConditions(conditions ...clusterv1.Condition) *ClusterClassBuilder { +func (c *ClusterClassBuilder) WithConditions(conditions ...metav1.Condition) *ClusterClassBuilder { c.conditions = conditions return c } @@ -494,11 +494,8 @@ func (c *ClusterClassBuilder) Build() *clusterv1.ClusterClass { Variables: c.statusVariables, }, } - // TODO (v1beta2) Use new conditions if c.conditions != nil { - obj.Status.Deprecated = &clusterv1.ClusterClassDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterClassV1Beta1DeprecatedStatus{Conditions: c.conditions}, - } + obj.Status.Conditions = c.conditions } if c.infrastructureClusterTemplate != nil { obj.Spec.Infrastructure = clusterv1.LocalObjectTemplate{ diff --git a/util/test/builder/zz_generated.deepcopy.go b/util/test/builder/zz_generated.deepcopy.go index 309d839c2d0a..5e1c10758242 100644 --- a/util/test/builder/zz_generated.deepcopy.go +++ b/util/test/builder/zz_generated.deepcopy.go @@ -205,7 +205,7 @@ func (in *ClusterClassBuilder) DeepCopyInto(out *ClusterClassBuilder) { } if in.conditions != nil { in, out := &in.conditions, &out.conditions - *out = make(v1beta2.Conditions, len(*in)) + *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } From 3a05bf5658d69441e1df083cd93394ea23ce06c1 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 17 Apr 2025 17:56:56 +0200 Subject: [PATCH 03/20] Stop using clusterv1.TopologyReconciledV1Beta1Condition in controllers --- .../cluster/cluster_controller_test.go | 4 ++-- .../topology/cluster/conditions_test.go | 20 +++++++++---------- test/e2e/cluster_upgrade_runtimesdk.go | 11 +++++----- util/conditions/getter.go | 16 +++++++++++++++ 4 files changed, 34 insertions(+), 17 deletions(-) diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 3cd7d99a1df5..dc9220cb1f9c 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" @@ -930,7 +930,7 @@ func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) } func assertClusterTopologyReconciledCondition(cluster *clusterv1.Cluster) error { - if !v1beta1conditions.Has(cluster, clusterv1.TopologyReconciledV1Beta1Condition) { + if !conditions.Has(cluster, clusterv1.ClusterTopologyReconciledCondition) { return fmt.Errorf("cluster should have the TopologyReconciled condition set") } return nil diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index e9aac176994d..9be71c05092e 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -978,17 +978,17 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { } else { g.Expect(err).ToNot(HaveOccurred()) - actualCondition := v1beta1conditions.Get(tt.cluster, clusterv1.TopologyReconciledV1Beta1Condition) - g.Expect(actualCondition).ToNot(BeNil()) - g.Expect(actualCondition.Status).To(BeEquivalentTo(tt.wantConditionStatus)) - g.Expect(actualCondition.Reason).To(BeEquivalentTo(tt.wantConditionReason)) - g.Expect(actualCondition.Message).To(BeEquivalentTo(tt.wantConditionMessage)) + actualV1Beta1Condition := v1beta1conditions.Get(tt.cluster, clusterv1.TopologyReconciledV1Beta1Condition) + g.Expect(actualV1Beta1Condition).ToNot(BeNil()) + g.Expect(actualV1Beta1Condition.Status).To(BeEquivalentTo(tt.wantConditionStatus)) + g.Expect(actualV1Beta1Condition.Reason).To(BeEquivalentTo(tt.wantConditionReason)) + g.Expect(actualV1Beta1Condition.Message).To(BeEquivalentTo(tt.wantConditionMessage)) - actualV1Beta2Condition := conditions.Get(tt.cluster, clusterv1.ClusterTopologyReconciledCondition) - g.Expect(actualV1Beta2Condition).ToNot(BeNil()) - g.Expect(actualV1Beta2Condition.Status).To(BeEquivalentTo(tt.wantV1Beta2ConditionStatus)) - g.Expect(actualV1Beta2Condition.Reason).To(BeEquivalentTo(tt.wantV1Beta2ConditionReason)) - g.Expect(actualV1Beta2Condition.Message).To(BeEquivalentTo(tt.wantV1Beta2ConditionMessage)) + actualCondition := conditions.Get(tt.cluster, clusterv1.ClusterTopologyReconciledCondition) + g.Expect(actualCondition).ToNot(BeNil()) + g.Expect(actualCondition.Status).To(BeEquivalentTo(tt.wantV1Beta2ConditionStatus)) + g.Expect(actualCondition.Reason).To(BeEquivalentTo(tt.wantV1Beta2ConditionReason)) + g.Expect(actualCondition.Message).To(BeEquivalentTo(tt.wantV1Beta2ConditionMessage)) } }) } diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 72654cfce6a9..9291a2ac4e89 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -596,10 +597,10 @@ func beforeClusterUpgradeAnnotationIsBlocking(ctx context.Context, c client.Clie cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ Name: clusterRef.Name, Namespace: clusterRef.Namespace, Getter: c}) - if v1beta1conditions.GetReason(cluster, clusterv1.TopologyReconciledV1Beta1Condition) != clusterv1.TopologyReconciledHookBlockingV1Beta1Reason { + if conditions.GetReason(cluster, clusterv1.ClusterTopologyReconciledCondition) != clusterv1.TopologyReconciledHookBlockingV1Beta1Reason { return fmt.Errorf("hook %s (via annotation) should lead to LifecycleHookBlocking reason", hookName) } - if !strings.Contains(v1beta1conditions.GetMessage(cluster, clusterv1.TopologyReconciledV1Beta1Condition), expectedBlockingMessage) { + if !strings.Contains(conditions.GetMessage(cluster, clusterv1.ClusterTopologyReconciledCondition), expectedBlockingMessage) { return fmt.Errorf("hook %[1]s (via annotation) should show hook %[1]s is blocking as message with: %[2]s", hookName, expectedBlockingMessage) } @@ -634,7 +635,7 @@ func beforeClusterUpgradeAnnotationIsBlocking(ctx context.Context, c client.Clie cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ Name: clusterRef.Name, Namespace: clusterRef.Namespace, Getter: c}) - if strings.Contains(v1beta1conditions.GetMessage(cluster, clusterv1.TopologyReconciledV1Beta1Condition), expectedBlockingMessage) { + if strings.Contains(conditions.GetMessage(cluster, clusterv1.ClusterTopologyReconciledCondition), expectedBlockingMessage) { return fmt.Errorf("hook %s (via annotation %s) should not be blocking anymore with message: %s", hookName, annotation, expectedBlockingMessage) } @@ -752,8 +753,8 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, cluster types. // clusterConditionShowsHookBlocking checks if the TopologyReconciled condition message contains both the hook name and hookFailedMessage. func clusterConditionShowsHookBlocking(cluster *clusterv1.Cluster, hookName string) bool { - return v1beta1conditions.GetReason(cluster, clusterv1.TopologyReconciledV1Beta1Condition) == clusterv1.TopologyReconciledHookBlockingV1Beta1Reason && - strings.Contains(v1beta1conditions.GetMessage(cluster, clusterv1.TopologyReconciledV1Beta1Condition), hookName) + return conditions.GetReason(cluster, clusterv1.ClusterTopologyReconciledCondition) == clusterv1.ClusterTopologyReconciledHookBlockingReason && + strings.Contains(conditions.GetMessage(cluster, clusterv1.ClusterTopologyReconciledCondition), hookName) } func dumpAndDeleteCluster(ctx context.Context, proxy framework.ClusterProxy, clusterctlConfigPath, namespace, clusterName, artifactFolder string) { diff --git a/util/conditions/getter.go b/util/conditions/getter.go index 81689fb94783..c278ff59bbb6 100644 --- a/util/conditions/getter.go +++ b/util/conditions/getter.go @@ -86,6 +86,22 @@ func IsUnknown(from Getter, conditionType string) bool { return true } +// GetReason returns a nil safe string of Reason for the condition with the given type. +func GetReason(from Getter, conditionType string) string { + if c := Get(from, conditionType); c != nil { + return c.Reason + } + return "" +} + +// GetMessage returns a nil safe string of Message. +func GetMessage(from Getter, conditionType string) string { + if c := Get(from, conditionType); c != nil { + return c.Message + } + return "" +} + // UnstructuredGetAll returns conditions from an Unstructured object. // // UnstructuredGetAll supports retrieving conditions from objects at different stages of the transition from From 036ccaed9023a0e2674eba3b319ac70bb33b2199 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Thu, 17 Apr 2025 19:59:25 +0200 Subject: [PATCH 04/20] Stop using clusterv1.MachineNodeHealthyV1Beta1Condition in controllers --- .../machine/machine_controller_noderef.go | 6 +- .../machine_controller_noderef_test.go | 2 +- .../machine/machine_controller_test.go | 33 ++++--- .../machineset/machineset_delete_policy.go | 5 +- .../machineset_delete_policy_test.go | 96 +++++++------------ test/e2e/cluster_upgrade_runtimesdk.go | 5 +- test/e2e/cluster_upgrade_test.go | 4 +- test/framework/machine_helpers.go | 4 +- 8 files changed, 60 insertions(+), 95 deletions(-) diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 0835d41252f3..55966d210c54 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -163,7 +163,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, } // Do the remaining node health checks, then set the node health to true if all checks pass. - status, message := summarizeNodeConditions(s.node) + status, message := summarizeNodeV1beta1Conditions(s.node) if status == corev1.ConditionFalse { v1beta1conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyV1Beta1Condition, clusterv1.NodeConditionsFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, message) return ctrl.Result{}, nil @@ -177,12 +177,12 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, return ctrl.Result{}, nil } -// summarizeNodeConditions summarizes a Node's conditions and returns the summary of condition statuses and concatenate failed condition messages: +// summarizeNodeV1beta1Conditions summarizes a Node's conditions and returns the summary of condition statuses and concatenate failed condition messages: // if there is at least 1 semantically-negative condition, summarized status = False; // if there is at least 1 semantically-positive condition when there is 0 semantically negative condition, summarized status = True; // if all conditions are unknown, summarized status = Unknown. // (semantically true conditions: NodeMemoryPressure/NodeDiskPressure/NodePIDPressure == false or Ready == true.) -func summarizeNodeConditions(node *corev1.Node) (corev1.ConditionStatus, string) { +func summarizeNodeV1beta1Conditions(node *corev1.Node) (corev1.ConditionStatus, string) { semanticallyFalseStatus := 0 unknownStatus := 0 diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index 04d6d527726a..486dd4fc6724 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -694,7 +694,7 @@ func TestSummarizeNodeConditions(t *testing.T) { Conditions: test.conditions, }, } - status, _ := summarizeNodeConditions(node) + status, _ := summarizeNodeV1beta1Conditions(node) g.Expect(status).To(Equal(test.status)) }) } diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index ef164675515a..c915ca47dd3f 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -980,7 +980,7 @@ func TestReconcileRequest(t *testing.T) { } } -func TestMachineConditions(t *testing.T) { +func TestMachineV1Beta1Conditions(t *testing.T) { infraConfig := func(provisioned bool) *unstructured.Unstructured { return &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -1090,7 +1090,7 @@ func TestMachineConditions(t *testing.T) { bootstrapDataSecretCreated bool beforeFunc func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) additionalObjects []client.Object - conditionsToAssert []*clusterv1.Condition + v1beta1ConditionsToAssert []*clusterv1.Condition wantErr bool }{ { @@ -1102,7 +1102,7 @@ func TestMachineConditions(t *testing.T) { v1beta1conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) v1beta1conditions.MarkTrue(m, clusterv1.MachineOwnerRemediatedV1Beta1Condition) }, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.TrueCondition(clusterv1.InfrastructureReadyV1Beta1Condition), v1beta1conditions.TrueCondition(clusterv1.BootstrapReadyV1Beta1Condition), v1beta1conditions.TrueCondition(clusterv1.MachineOwnerRemediatedV1Beta1Condition), @@ -1124,7 +1124,7 @@ func TestMachineConditions(t *testing.T) { }, }) }, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyV1Beta1Condition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), }, }, @@ -1132,7 +1132,7 @@ func TestMachineConditions(t *testing.T) { name: "infra condition consumes the fallback reason", infraProvisioned: false, bootstrapDataSecretCreated: true, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyV1Beta1Condition, clusterv1.WaitingForInfrastructureFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.WaitingForInfrastructureFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), }, @@ -1151,7 +1151,7 @@ func TestMachineConditions(t *testing.T) { }, }) }, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyV1Beta1Condition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), }, }, @@ -1159,7 +1159,7 @@ func TestMachineConditions(t *testing.T) { name: "bootstrap condition consumes the fallback reason", infraProvisioned: true, bootstrapDataSecretCreated: false, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyV1Beta1Condition, clusterv1.WaitingForDataSecretFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.WaitingForDataSecretFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), }, @@ -1170,7 +1170,7 @@ func TestMachineConditions(t *testing.T) { name: "ready condition summary consumes reason from the infra condition", infraProvisioned: false, bootstrapDataSecretCreated: false, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.WaitingForInfrastructureFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), }, }, @@ -1181,7 +1181,7 @@ func TestMachineConditions(t *testing.T) { beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { v1beta1conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "MHC failed") }, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "MHC failed"), }, }, @@ -1192,7 +1192,7 @@ func TestMachineConditions(t *testing.T) { beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { v1beta1conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.NodeNotFoundV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") }, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.NodeNotFoundV1Beta1Reason, clusterv1.ConditionSeverityWarning, ""), }, }, @@ -1209,7 +1209,7 @@ func TestMachineConditions(t *testing.T) { Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, }}, wantErr: true, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.TrueCondition(clusterv1.InfrastructureReadyV1Beta1Condition), v1beta1conditions.TrueCondition(clusterv1.BootstrapReadyV1Beta1Condition), v1beta1conditions.TrueCondition(clusterv1.ReadyV1Beta1Condition), @@ -1223,7 +1223,7 @@ func TestMachineConditions(t *testing.T) { beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { v1beta1conditions.MarkFalse(m, clusterv1.DrainingSucceededV1Beta1Condition, clusterv1.DrainingFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") }, - conditionsToAssert: []*clusterv1.Condition{ + v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.DrainingFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, ""), }, }, @@ -1275,7 +1275,7 @@ func TestMachineConditions(t *testing.T) { m = &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(&machine), m)).ToNot(HaveOccurred()) - assertConditions(t, m, tt.conditionsToAssert...) + assertV1Beta1Conditions(t, m, tt.v1beta1ConditionsToAssert...) }) } } @@ -3510,19 +3510,18 @@ func addConditionsToExternal(u *unstructured.Unstructured, newConditions cluster } // asserts the conditions set on the Getter object. -// TODO: replace this with util.condition.MatchConditions (or a new matcher in controller runtime komega). -func assertConditions(t *testing.T, from v1beta1conditions.Getter, conditions ...*clusterv1.Condition) { +func assertV1Beta1Conditions(t *testing.T, from v1beta1conditions.Getter, conditions ...*clusterv1.Condition) { t.Helper() for _, condition := range conditions { - assertCondition(t, from, condition) + assertV1Beta1Condition(t, from, condition) } } // asserts whether a condition of type is set on the Getter object // when the condition is true, asserting the reason/severity/message // for the condition are avoided. -func assertCondition(t *testing.T, from v1beta1conditions.Getter, condition *clusterv1.Condition) { +func assertV1Beta1Condition(t *testing.T, from v1beta1conditions.Getter, condition *clusterv1.Condition) { t.Helper() g := NewWithT(t) diff --git a/internal/controllers/machineset/machineset_delete_policy.go b/internal/controllers/machineset/machineset_delete_policy.go index 445d06ef2702..8a23f70989ea 100644 --- a/internal/controllers/machineset/machineset_delete_policy.go +++ b/internal/controllers/machineset/machineset_delete_policy.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) @@ -147,9 +148,7 @@ func isMachineHealthy(machine *clusterv1.Machine) bool { return false } // Note: for the sake of prioritization, we are not making any assumption about Health when ConditionUnknown. - // TODO (v1beta2): test for v1beta2 conditions - nodeHealthyCondition := v1beta1conditions.Get(machine, clusterv1.MachineNodeHealthyV1Beta1Condition) - if nodeHealthyCondition != nil && nodeHealthyCondition.Status == corev1.ConditionFalse { + if conditions.IsFalse(machine, clusterv1.MachineNodeReadyCondition) { return false } healthCheckCondition := v1beta1conditions.Get(machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) diff --git a/internal/controllers/machineset/machineset_delete_policy_test.go b/internal/controllers/machineset/machineset_delete_policy_test.go index 1286a0cc3ab6..fc7e08eaa447 100644 --- a/internal/controllers/machineset/machineset_delete_policy_test.go +++ b/internal/controllers/machineset/machineset_delete_policy_test.go @@ -56,14 +56,10 @@ func TestMachineToDelete(t *testing.T) { nodeHealthyConditionFalseMachine := &clusterv1.Machine{ Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, }, @@ -71,14 +67,10 @@ func TestMachineToDelete(t *testing.T) { nodeHealthyConditionUnknownMachine := &clusterv1.Machine{ Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionUnknown, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionUnknown, }, }, }, @@ -349,14 +341,10 @@ func TestMachineNewestDelete(t *testing.T) { ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, }, @@ -365,14 +353,10 @@ func TestMachineNewestDelete(t *testing.T) { ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionUnknown, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionUnknown, }, }, }, @@ -532,14 +516,10 @@ func TestMachineOldestDelete(t *testing.T) { ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, }, @@ -548,14 +528,10 @@ func TestMachineOldestDelete(t *testing.T) { ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionUnknown, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionUnknown, }, }, }, @@ -809,14 +785,10 @@ func TestIsMachineHealthy(t *testing.T) { machine: &clusterv1.Machine{ Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, }, @@ -828,14 +800,10 @@ func TestIsMachineHealthy(t *testing.T) { machine: &clusterv1.Machine{ Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineNodeHealthyV1Beta1Condition, - Status: corev1.ConditionUnknown, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionUnknown, }, }, }, diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 9291a2ac4e89..1cf2db709d00 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -249,9 +249,8 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl for i := range machineList.Items { machine := &machineList.Items[i] - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.IsTrue(machine, clusterv1.MachineNodeHealthyV1Beta1Condition) { - return errors.Errorf("machine %q does not have %q condition set to true", machine.GetName(), clusterv1.MachineNodeHealthyV1Beta1Condition) + if !conditions.IsTrue(machine, clusterv1.MachineNodeReadyCondition) { + return errors.Errorf("machine %q does not have %q condition set to true", machine.GetName(), clusterv1.MachineNodeReadyCondition) } } diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index d3d22529c9ff..27c5e6769310 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -33,7 +33,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/test/e2e/internal/log" "sigs.k8s.io/cluster-api/test/framework" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -116,7 +116,7 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA var upgradedAndHealthy int64 deletingMachines := []clusterv1.Machine{} for _, m := range machines { - if *m.Spec.Version == cluster.Spec.Topology.Version && v1beta1conditions.IsTrue(&m, clusterv1.MachineNodeHealthyV1Beta1Condition) { + if *m.Spec.Version == cluster.Spec.Topology.Version && conditions.IsTrue(&m, clusterv1.MachineNodeReadyCondition) { upgradedAndHealthy++ } if !m.DeletionTimestamp.IsZero() { diff --git a/test/framework/machine_helpers.go b/test/framework/machine_helpers.go index 37907f752717..98cbdcc5e1b7 100644 --- a/test/framework/machine_helpers.go +++ b/test/framework/machine_helpers.go @@ -29,7 +29,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" "sigs.k8s.io/cluster-api/test/framework/internal/log" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -170,7 +170,7 @@ func WaitForControlPlaneMachinesToBeUpgraded(ctx context.Context, input WaitForC upgraded := 0 for _, machine := range machines { m := machine - if *m.Spec.Version == input.KubernetesUpgradeVersion && v1beta1conditions.IsTrue(&m, clusterv1.MachineNodeHealthyV1Beta1Condition) { + if *m.Spec.Version == input.KubernetesUpgradeVersion && conditions.IsTrue(&m, clusterv1.MachineNodeReadyCondition) { upgraded++ } } From 75a62c4758f234028f005403544cf1df9964da16 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 18 Apr 2025 10:58:48 +0200 Subject: [PATCH 05/20] Stop using clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition in controllers --- .../internal/controllers/controller.go | 5 ++--- .../internal/controllers/controller_test.go | 20 +++++++++---------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 410e031489aa..12b96dd35414 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -1174,9 +1174,8 @@ func (r *KubeadmControlPlaneReconciler) reconcilePreTerminateHook(ctx context.Co } // Return early because the Machine controller is not yet waiting for the pre-terminate hook. - // TODO (v1beta2): test for v1beta2 conditions - c := v1beta1conditions.Get(deletingMachine, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition) - if c == nil || c.Status != corev1.ConditionFalse || c.Reason != clusterv1.WaitingExternalHookV1Beta1Reason { + c := conditions.Get(deletingMachine, clusterv1.MachineDeletingCondition) + if c == nil || c.Status != metav1.ConditionTrue || c.Reason != clusterv1.MachineDeletingWaitingForPreTerminateHookReason { return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index e2b1b8ddfc26..29cb2fdad346 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -2943,7 +2943,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { }, }, { - name: "Requeue, if the deleting Machine has no PreTerminateDeleteHookSucceeded condition", + name: "Requeue, if the deleting Machine has no Deleting condition", controlPlane: &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ @@ -2961,7 +2961,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { }, }, { - name: "Requeue, if the deleting Machine has PreTerminateDeleteHookSucceeded condition true", + name: "Requeue, if the deleting Machine has Deleting condition false", controlPlane: &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ @@ -2971,7 +2971,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { Machines: collections.Machines{ deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - v1beta1conditions.MarkTrue(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition) + conditions.Set(m, metav1.Condition{Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionFalse}) return m }(), }, @@ -2983,7 +2983,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { }, }, { - name: "Requeue, if the deleting Machine has PreTerminateDeleteHookSucceeded condition false but not waiting for hook", + name: "Requeue, if the deleting Machine has Deleting condition true but not waiting for hook", controlPlane: &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ @@ -2993,7 +2993,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { Machines: collections.Machines{ deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition, "some-other-reason", clusterv1.ConditionSeverityInfo, "some message") + conditions.Set(m, metav1.Condition{Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: "Some other reason"}) return m }(), }, @@ -3017,7 +3017,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { machine.Name: machine, // Leadership will be forwarded to this Machine. deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition, clusterv1.WaitingExternalHookV1Beta1Reason, clusterv1.ConditionSeverityInfo, "some message") + conditions.Set(m, metav1.Condition{Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingWaitingForPreTerminateHookReason}) return m }(), }, @@ -3043,13 +3043,13 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() m.DeletionTimestamp.Time = m.DeletionTimestamp.Add(-1 * time.Duration(1) * time.Second) // Make sure this (the oldest) Machine is selected to run the pre-terminate hook. - v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition, clusterv1.WaitingExternalHookV1Beta1Reason, clusterv1.ConditionSeverityInfo, "some message") + conditions.Set(m, metav1.Condition{Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingWaitingForPreTerminateHookReason}) return m }(), deletingMachineWithKCPPreTerminateHook.Name + "-2": func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() m.Name += "-2" - v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition, clusterv1.WaitingExternalHookV1Beta1Reason, clusterv1.ConditionSeverityInfo, "some message") + conditions.Set(m, metav1.Condition{Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingWaitingForPreTerminateHookReason}) return m }(), }, @@ -3074,7 +3074,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { Machines: collections.Machines{ deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition, clusterv1.WaitingExternalHookV1Beta1Reason, clusterv1.ConditionSeverityInfo, "some message") + conditions.Set(m, metav1.Condition{Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingWaitingForPreTerminateHookReason}) return m }(), }, @@ -3108,7 +3108,7 @@ func TestKubeadmControlPlaneReconciler_reconcilePreTerminateHook(t *testing.T) { machine.Name: machine, deletingMachineWithKCPPreTerminateHook.Name: func() *clusterv1.Machine { m := deletingMachineWithKCPPreTerminateHook.DeepCopy() - v1beta1conditions.MarkFalse(m, clusterv1.PreTerminateDeleteHookSucceededV1Beta1Condition, clusterv1.WaitingExternalHookV1Beta1Reason, clusterv1.ConditionSeverityInfo, "some message") + conditions.Set(m, metav1.Condition{Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingWaitingForPreTerminateHookReason}) return m }(), }, From 7f7744ddfe7980fe6fbf34b8a9a40b93df645db5 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 18 Apr 2025 11:42:36 +0200 Subject: [PATCH 06/20] Stop using clusterv1.DrainingSucceededV1Beta1Condition in controllers --- .../controllers/machine/machine_controller.go | 1 - .../machine/machine_controller_test.go | 26 ++++++------- test/e2e/node_drain.go | 37 +++++++++++-------- 3 files changed, 35 insertions(+), 29 deletions(-) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 4456c45dad23..11e7625fcd3e 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -488,7 +488,6 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } // The DrainingSucceededCondition never exists before the node is drained for the first time. - // TODO (v1beta2): test for v1beta2 conditions if v1beta1conditions.Get(m, clusterv1.DrainingSucceededV1Beta1Condition) == nil { v1beta1conditions.MarkFalse(m, clusterv1.DrainingSucceededV1Beta1Condition, clusterv1.DrainingV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Draining the node before deletion") } diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index c915ca47dd3f..7e2bcbed5e45 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1511,16 +1511,16 @@ func TestDrainNode(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) tests := []struct { - name string - nodeName string - node *corev1.Node - pods []*corev1.Pod - nodeDrainStartTime *metav1.Time - wantCondition *clusterv1.Condition - wantResult ctrl.Result - wantErr string - wantDeletingReason string - wantDeletingMessage string + name string + nodeName string + node *corev1.Node + pods []*corev1.Pod + nodeDrainStartTime *metav1.Time + wantV1Beta1Condition *clusterv1.Condition + wantResult ctrl.Result + wantErr string + wantDeletingReason string + wantDeletingMessage string }{ { name: "Node does not exist, no-op", @@ -1625,7 +1625,7 @@ func TestDrainNode(t *testing.T) { }, nodeDrainStartTime: &metav1.Time{Time: nodeDrainStartTime}, wantResult: ctrl.Result{RequeueAfter: 20 * time.Second}, - wantCondition: &clusterv1.Condition{ + wantV1Beta1Condition: &clusterv1.Condition{ Type: clusterv1.DrainingSucceededV1Beta1Condition, Status: corev1.ConditionFalse, Severity: clusterv1.ConditionSeverityInfo, @@ -1741,13 +1741,13 @@ func TestDrainNode(t *testing.T) { } gotCondition := v1beta1conditions.Get(testMachine, clusterv1.DrainingSucceededV1Beta1Condition) - if tt.wantCondition == nil { + if tt.wantV1Beta1Condition == nil { g.Expect(gotCondition).To(BeNil()) } else { g.Expect(gotCondition).ToNot(BeNil()) // Cleanup for easier comparison gotCondition.LastTransitionTime = metav1.Time{} - g.Expect(gotCondition).To(BeComparableTo(tt.wantCondition)) + g.Expect(gotCondition).To(BeComparableTo(tt.wantV1Beta1Condition)) } g.Expect(s.deletingReason).To(Equal(tt.wantDeletingReason)) diff --git a/test/e2e/node_drain.go b/test/e2e/node_drain.go index 6a5f6242fe37..6fb282482456 100644 --- a/test/e2e/node_drain.go +++ b/test/e2e/node_drain.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) @@ -336,10 +337,10 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo ClusterName: cluster.Name, Namespace: cluster.Namespace, }) - var condition *clusterv1.Condition + var condition *metav1.Condition for _, machine := range controlPlaneMachines { - condition = v1beta1conditions.Get(&machine, clusterv1.DrainingSucceededV1Beta1Condition) - if condition != nil { + condition = conditions.Get(&machine, clusterv1.MachineDeletingCondition) + if condition != nil && condition.Status == metav1.ConditionTrue && condition.Reason == clusterv1.MachineDeletingDrainingNodeReason { // We only expect to find the condition on one Machine (as KCP will only try to drain one Machine at a time) drainingCPMachineKey = client.ObjectKeyFromObject(&machine) drainingCPNodeName = machine.Status.NodeRef.Name @@ -467,9 +468,10 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedCPMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingCPMachineKey, drainedCPMachine)).To(Succeed()) - condition := v1beta1conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededV1Beta1Condition) + condition := conditions.Get(drainedCPMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingDrainingNodeReason)) // The evictable Pod should be gone now. g.Expect(condition.Message).ToNot(ContainSubstring("deletionTimestamp set, but still not removed from the Node")) // The unevictable Pod should still not be evicted because of the wait-completed label. @@ -480,9 +482,10 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedMDMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingMDMachineKeys[md.Name], drainedMDMachine)).To(Succeed()) - condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededV1Beta1Condition) + condition := conditions.Get(drainedMDMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingDrainingNodeReason)) // The evictable Pod should be gone now. g.Expect(condition.Message).ToNot(ContainSubstring("deletionTimestamp set, but still not removed from the Node")) // The unevictable Pod should still not be evicted because of the wait-completed label. @@ -504,9 +507,10 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedCPMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingCPMachineKey, drainedCPMachine)).To(Succeed()) - condition := v1beta1conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededV1Beta1Condition) + condition := conditions.Get(drainedCPMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingDrainingNodeReason)) // The evictable Pod should be gone now. g.Expect(condition.Message).ToNot(ContainSubstring("deletionTimestamp set, but still not removed from the Node")) // The unevictable Pod should still not be evicted because of the PDB. @@ -517,9 +521,10 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo drainedMDMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingMDMachineKeys[md.Name], drainedMDMachine)).To(Succeed()) - condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededV1Beta1Condition) + condition := conditions.Get(drainedMDMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingDrainingNodeReason)) // The evictable Pod should be gone now. g.Expect(condition.Message).ToNot(ContainSubstring("deletionTimestamp set, but still not removed from the Node")) // The unevictable Pod should still not be evicted because of the PDB. @@ -705,9 +710,10 @@ func verifyNodeDrainsBlockedAndUnblock(ctx context.Context, input verifyNodeDrai g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, input.DrainedCPMachineKey, drainedCPMachine)).To(Succeed()) // Verify condition on drained CP Machine. - condition := v1beta1conditions.Get(drainedCPMachine, clusterv1.DrainingSucceededV1Beta1Condition) + condition := conditions.Get(drainedCPMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingDrainingNodeReason)) for _, messageSubstring := range input.CPConditionMessageSubstrings { var re = regexp.MustCompile(messageSubstring) match := re.MatchString(condition.Message) @@ -733,9 +739,10 @@ func verifyNodeDrainsBlockedAndUnblock(ctx context.Context, input verifyNodeDrai g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, input.DrainedMDMachineKeys[md.Name], drainedMDMachine)).To(Succeed()) // Verify condition on drained MD Machine. - condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.DrainingSucceededV1Beta1Condition) + condition := conditions.Get(drainedMDMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingDrainingNodeReason)) for _, messageSubstring := range input.MDConditionMessageSubstrings[md.Name] { var re = regexp.MustCompile(messageSubstring) match := re.MatchString(condition.Message) From 38333f3b1514c99a4c774009a9cf56d1edd50b4d Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 18 Apr 2025 12:43:18 +0200 Subject: [PATCH 07/20] Stop using clusterv1.VolumeDetachSucceededV1Beta1Condition in controllers --- .../controllers/machine/machine_controller.go | 1 - test/e2e/node_drain.go | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 11e7625fcd3e..f0185bbd613a 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -529,7 +529,6 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } // The VolumeDetachSucceededCondition never exists before we wait for volume detachment for the first time. - // TODO (v1beta2): test for v1beta2 conditions if v1beta1conditions.Get(m, clusterv1.VolumeDetachSucceededV1Beta1Condition) == nil { v1beta1conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededV1Beta1Condition, clusterv1.WaitingForVolumeDetachV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") } diff --git a/test/e2e/node_drain.go b/test/e2e/node_drain.go index 6fb282482456..6eddb53d720c 100644 --- a/test/e2e/node_drain.go +++ b/test/e2e/node_drain.go @@ -38,7 +38,6 @@ import ( "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // NodeDrainTimeoutSpecInput is the input for NodeDrainTimeoutSpec. @@ -552,21 +551,23 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo waitingCPMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, drainingCPMachineKey, waitingCPMachine)).To(Succeed()) - condition := v1beta1conditions.Get(waitingCPMachine, clusterv1.VolumeDetachSucceededV1Beta1Condition) + condition := conditions.Get(waitingCPMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingWaitingForVolumeDetachReason)) // Deletion still not be blocked because of the volume. - g.Expect(condition.Message).To(ContainSubstring("Waiting for node volumes to be detached")) + g.Expect(condition.Message).To(ContainSubstring("Waiting for Node volumes to be detached")) }, input.E2EConfig.GetIntervals(specName, "wait-machine-deleted")...).Should(Succeed()) for _, machineKey := range drainingMDMachineKeys { Eventually(func(g Gomega) { drainedMDMachine := &clusterv1.Machine{} g.Expect(input.BootstrapClusterProxy.GetClient().Get(ctx, machineKey, drainedMDMachine)).To(Succeed()) - condition := v1beta1conditions.Get(drainedMDMachine, clusterv1.VolumeDetachSucceededV1Beta1Condition) + condition := conditions.Get(drainedMDMachine, clusterv1.MachineDeletingCondition) g.Expect(condition).ToNot(BeNil()) - g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) // Deletion still not be blocked because of the volume. - g.Expect(condition.Message).To(ContainSubstring("Waiting for node volumes to be detached")) + g.Expect(condition.Status).To(Equal(metav1.ConditionTrue)) + g.Expect(condition.Reason).To(Equal(clusterv1.MachineDeletingWaitingForVolumeDetachReason)) // Deletion still not be blocked because of the volume. + g.Expect(condition.Message).To(ContainSubstring("Waiting for Node volumes to be detached")) }, input.E2EConfig.GetIntervals(specName, "wait-machine-deleted")...).Should(Succeed()) } From 5ab00ee624e07c40a0b0dd7fe03db937d353c672 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 18 Apr 2025 14:10:08 +0200 Subject: [PATCH 08/20] Stop using clusterv1.BootstrapReadyV1Beta1Condition in controllers --- .../machine/machine_controller_test.go | 92 ++++++++++++++++++- 1 file changed, 91 insertions(+), 1 deletion(-) diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 7e2bcbed5e45..b8d378b89778 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/api/v1beta2/index" @@ -50,6 +51,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/cache" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/test/builder" @@ -1035,6 +1037,24 @@ func TestMachineV1Beta1Conditions(t *testing.T) { Name: "test-cluster", Namespace: metav1.NamespaceDefault, }, + Status: clusterv1.ClusterStatus{ + Initialization: &clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: true, + }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.ClusterControlPlaneInitializedCondition, + Status: metav1.ConditionTrue, + }, + }, + Deprecated: &clusterv1.ClusterDeprecatedStatus{ + V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ + Conditions: clusterv1.Conditions{ + *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedV1Beta1Condition), + }, + }, + }, + }, } machine := clusterv1.Machine{ @@ -1082,6 +1102,14 @@ func TestMachineV1Beta1Conditions(t *testing.T) { Name: "test", }, Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + {Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse}, + {Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse}, + {Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse}, + }, + }, } testcases := []struct { @@ -1090,6 +1118,7 @@ func TestMachineV1Beta1Conditions(t *testing.T) { bootstrapDataSecretCreated bool beforeFunc func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) additionalObjects []client.Object + conditionsToAssert []metav1.Condition v1beta1ConditionsToAssert []*clusterv1.Condition wantErr bool }{ @@ -1102,6 +1131,11 @@ func TestMachineV1Beta1Conditions(t *testing.T) { v1beta1conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) v1beta1conditions.MarkTrue(m, clusterv1.MachineOwnerRemediatedV1Beta1Condition) }, + conditionsToAssert: []metav1.Condition{ + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineReadyReason, Message: ""}, + {Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineBootstrapConfigReadyReason, Message: ""}, + {Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineInfrastructureReadyReason, Message: ""}, + }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.TrueCondition(clusterv1.InfrastructureReadyV1Beta1Condition), v1beta1conditions.TrueCondition(clusterv1.BootstrapReadyV1Beta1Condition), @@ -1124,6 +1158,9 @@ func TestMachineV1Beta1Conditions(t *testing.T) { }, }) }, + conditionsToAssert: []metav1.Condition{ + {Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionFalse, Reason: "Custom reason", Message: ""}, + }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyV1Beta1Condition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), }, @@ -1132,6 +1169,10 @@ func TestMachineV1Beta1Conditions(t *testing.T) { name: "infra condition consumes the fallback reason", infraProvisioned: false, bootstrapDataSecretCreated: true, + conditionsToAssert: []metav1.Condition{ + {Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureNotReadyReason, Message: "GenericInfrastructureMachine status.ready is false"}, + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* InfrastructureReady: GenericInfrastructureMachine status.ready is false"}, + }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyV1Beta1Condition, clusterv1.WaitingForInfrastructureFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.WaitingForInfrastructureFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), @@ -1151,6 +1192,9 @@ func TestMachineV1Beta1Conditions(t *testing.T) { }, }) }, + conditionsToAssert: []metav1.Condition{ + {Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionFalse, Reason: "Custom reason", Message: ""}, + }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyV1Beta1Condition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), }, @@ -1159,6 +1203,10 @@ func TestMachineV1Beta1Conditions(t *testing.T) { name: "bootstrap condition consumes the fallback reason", infraProvisioned: true, bootstrapDataSecretCreated: false, + conditionsToAssert: []metav1.Condition{ + {Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigNotReadyReason, Message: "GenericBootstrapConfig status.ready is false"}, + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* BootstrapConfigReady: GenericBootstrapConfig status.ready is false"}, + }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyV1Beta1Condition, clusterv1.WaitingForDataSecretFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.WaitingForDataSecretFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), @@ -1167,10 +1215,14 @@ func TestMachineV1Beta1Conditions(t *testing.T) { // Assert summary conditions // infra condition takes precedence over bootstrap condition in generating summary { - name: "ready condition summary consumes reason from the infra condition", + name: "ready condition summary use a generic reason in case of multiple issues", infraProvisioned: false, bootstrapDataSecretCreated: false, + conditionsToAssert: []metav1.Condition{ + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* BootstrapConfigReady: GenericBootstrapConfig status.ready is false\n* InfrastructureReady: GenericInfrastructureMachine status.ready is false"}, + }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ + // in V1beta1 ready condition summary consumes reason from the infra condition v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.WaitingForInfrastructureFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), }, }, @@ -1209,6 +1261,9 @@ func TestMachineV1Beta1Conditions(t *testing.T) { Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, }}, wantErr: true, + conditionsToAssert: []metav1.Condition{ + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineReadyUnknownReason, Message: "* NodeHealthy: Please check controller logs for errors"}, + }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.TrueCondition(clusterv1.InfrastructureReadyV1Beta1Condition), v1beta1conditions.TrueCondition(clusterv1.BootstrapReadyV1Beta1Condition), @@ -1223,6 +1278,7 @@ func TestMachineV1Beta1Conditions(t *testing.T) { beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { v1beta1conditions.MarkFalse(m, clusterv1.DrainingSucceededV1Beta1Condition, clusterv1.DrainingFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") }, + // conditionsToAssert: in v1beta1 clusterv1.DrainingSucceededV1Beta1Condition has been merged into Deleting. v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.ReadyV1Beta1Condition, clusterv1.DrainingFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, ""), }, @@ -1263,6 +1319,9 @@ func TestMachineV1Beta1Conditions(t *testing.T) { Scheme: clientFake.Scheme(), PredicateLogger: ptr.To(logr.New(log.NullLogSink{})), }, + controller: &fakeController{}, + predicateLog: ptr.To(logr.New(log.NullLogSink{})), + RemoteConditionsGracePeriod: time.Since(time.Time{}), } _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&machine)}) @@ -1275,11 +1334,30 @@ func TestMachineV1Beta1Conditions(t *testing.T) { m = &clusterv1.Machine{} g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(&machine), m)).ToNot(HaveOccurred()) + assertConditions(t, m, tt.conditionsToAssert...) assertV1Beta1Conditions(t, m, tt.v1beta1ConditionsToAssert...) }) } } +type fakeController struct{} + +func (f fakeController) Reconcile(_ context.Context, _ reconcile.Request) (reconcile.Result, error) { + panic("implement me") +} + +func (f fakeController) Watch(_ source.TypedSource[reconcile.Request]) error { + return nil +} + +func (f fakeController) Start(_ context.Context) error { + panic("implement me") +} + +func (f fakeController) GetLogger() logr.Logger { + panic("implement me") +} + func TestRemoveMachineFinalizerAfterDeleteReconcile(t *testing.T) { g := NewWithT(t) @@ -3509,6 +3587,18 @@ func addConditionsToExternal(u *unstructured.Unstructured, newConditions cluster v1beta1conditions.UnstructuredSetter(u).SetV1Beta1Conditions(existingConditions) } +// asserts the conditions set on the Getter object. +func assertConditions(t *testing.T, from conditions.Getter, conditionsToAssert ...metav1.Condition) { + t.Helper() + + g := NewWithT(t) + for _, condition := range conditionsToAssert { + actualCondition := conditions.Get(from, condition.Type) + g.Expect(actualCondition).ToNot(BeNil(), "condition %s is missing", condition.Type) + g.Expect(*actualCondition).To(conditions.MatchCondition(condition, conditions.IgnoreLastTransitionTime(true))) + } +} + // asserts the conditions set on the Getter object. func assertV1Beta1Conditions(t *testing.T, from v1beta1conditions.Getter, conditions ...*clusterv1.Condition) { t.Helper() From be38f19bef0d3e63a22bd5af5197a96171bb1483 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 18 Apr 2025 15:14:42 +0200 Subject: [PATCH 09/20] Stop using clusterv1.ControlPlaneInitializedV1Beta1Condition in controllers --- .../controllers/kubeadmconfig_controller.go | 3 +- .../kubeadmconfig_controller_test.go | 26 +++++------ cmd/clusterctl/client/cluster/mover.go | 5 +-- cmd/clusterctl/client/cluster/mover_test.go | 41 +++++------------ .../remote/cluster_cache_healthcheck_test.go | 4 +- controllers/remote/cluster_cache_tracker.go | 5 +-- .../remote/cluster_cache_tracker_test.go | 4 +- .../controllers/machinepool_controller.go | 4 +- .../controllers/cluster/cluster_controller.go | 13 +++--- .../cluster/cluster_controller_phases.go | 2 +- .../cluster/cluster_controller_phases_test.go | 2 + .../cluster/cluster_controller_test.go | 6 +-- .../controllers/machine/machine_controller.go | 3 +- .../machine/machine_controller_status.go | 6 +-- .../machine/machine_controller_status_test.go | 35 +++++---------- .../machine/machine_controller_test.go | 7 --- .../machinehealthcheck_controller.go | 2 +- .../machinehealthcheck_targets.go | 7 ++- .../machinehealthcheck_targets_test.go | 16 +++++-- .../topology/cluster/reconcile_state.go | 8 ++-- .../topology/cluster/reconcile_state_test.go | 44 ++++--------------- .../backends/docker/dockermachine_backend.go | 6 +-- .../inmemory/inmemorymachine_backend.go | 3 +- util/conditions/getter.go | 8 ++++ util/predicates/cluster_predicates.go | 8 ++-- util/predicates/cluster_predicates_test.go | 7 +-- 26 files changed, 108 insertions(+), 167 deletions(-) diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index e1053a25cdcd..44082fa02ea0 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -358,8 +358,7 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c } // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { return r.handleClusterNotInitialized(ctx, scope) } diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 807422a5b8a6..7b1e270fbe0c 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -487,7 +487,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "validhost", Port: 6443} cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine.Namespace, configName) @@ -551,7 +551,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC // TODO: extract this kind of code into a setup function that puts the state of objects into an initialized controlplane (implies secrets exist) cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine.Namespace, "control-plane-init-cfg") @@ -597,7 +597,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine.Namespace, "control-plane-init-cfg") addKubeadmConfigToMachine(controlPlaneInitConfig, controlPlaneInitMachine) @@ -644,7 +644,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsReady(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} useCases := []struct { @@ -732,7 +732,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} useCases := []struct { @@ -845,7 +845,7 @@ func TestBootstrapDataFormat(t *testing.T) { cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} if tc.clusterInitialized { - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} } var machine *clusterv1.Machine @@ -937,7 +937,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1010,7 +1010,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1267,7 +1267,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1460,7 +1460,7 @@ func TestBootstrapTokenRefreshIfTokenSecretCleaned(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1534,7 +1534,7 @@ func TestBootstrapTokenRefreshIfTokenSecretCleaned(t *testing.T) { cluster := builder.Cluster(metav1.NamespaceDefault, "cluster").Build() cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1928,7 +1928,7 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques // Setup work for an initialized cluster clusterName := "my-cluster" cluster := builder.Cluster(metav1.NamespaceDefault, clusterName).Build() - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: "example.com", @@ -2765,7 +2765,7 @@ func TestKubeadmConfigReconciler_Reconcile_v1beta2_conditions(t *testing.T) { // Setup work for an initialized cluster clusterName := "my-cluster" cluster := builder.Cluster(metav1.NamespaceDefault, clusterName).Build() - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}} cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: "example.com", diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index bd4114ad000f..73279fd02e81 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -40,7 +40,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/yaml" ) @@ -246,8 +246,7 @@ func (o *objectMover) checkProvisioningCompleted(ctx context.Context, graph *obj } // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.IsTrue(clusterObj, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if !conditions.IsTrue(clusterObj, clusterv1.ClusterControlPlaneInitializedCondition) { errList = append(errList, errors.Errorf("cannot start the move operation while the control plane for %q %s/%s is not yet initialized", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName())) continue } diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 3f0452aa81b8..7a360546a0db 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -42,7 +42,6 @@ import ( clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test/providers/infrastructure" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) type moveTestsFields struct { @@ -1483,12 +1482,8 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { }, Status: clusterv1.ClusterStatus{ Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: false}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedV1Beta1Condition), - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}, }, }, }, @@ -1532,12 +1527,8 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { }, Status: clusterv1.ClusterStatus{ Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.FalseCondition(clusterv1.ControlPlaneInitializedV1Beta1Condition, "", clusterv1.ConditionSeverityInfo, ""), - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionFalse}, }, }, }, @@ -1563,12 +1554,8 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { }, Status: clusterv1.ClusterStatus{ Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedV1Beta1Condition), - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}, }, }, }, @@ -1592,12 +1579,8 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { }, Status: clusterv1.ClusterStatus{ Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedV1Beta1Condition), - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}, }, }, }, @@ -1642,12 +1625,8 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { }, Status: clusterv1.ClusterStatus{ Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true, ControlPlaneInitialized: true}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedV1Beta1Condition), - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}, }, }, }, diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index c568f2f6baea..c0d3040a7fa5 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -39,7 +39,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" ) func TestClusterCacheHealthCheck(t *testing.T) { @@ -98,7 +98,7 @@ func TestClusterCacheHealthCheck(t *testing.T) { }, } g.Expect(env.CreateAndWait(ctx, testCluster)).To(Succeed()) - v1beta1conditions.MarkTrue(testCluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + conditions.Set(testCluster, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedReason}) testCluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} g.Expect(k8sClient.Status().Update(ctx, testCluster)).To(Succeed()) diff --git a/controllers/remote/cluster_cache_tracker.go b/controllers/remote/cluster_cache_tracker.go index 3623a81c8231..704890e49d99 100644 --- a/controllers/remote/cluster_cache_tracker.go +++ b/controllers/remote/cluster_cache_tracker.go @@ -50,7 +50,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util/certs" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" ) const ( @@ -699,8 +699,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health return false, nil } - // TODO (v1beta2): test for v1beta2 conditions - if cluster.Status.Initialization == nil || !cluster.Status.Initialization.InfrastructureProvisioned || !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if cluster.Status.Initialization == nil || !cluster.Status.Initialization.InfrastructureProvisioned || !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { // If the infrastructure or control plane aren't marked as provisioned/initialized, we should requeue and wait. return false, nil } diff --git a/controllers/remote/cluster_cache_tracker_test.go b/controllers/remote/cluster_cache_tracker_test.go index 5e9f3bd15537..1d2ba833c54c 100644 --- a/controllers/remote/cluster_cache_tracker_test.go +++ b/controllers/remote/cluster_cache_tracker_test.go @@ -37,7 +37,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" ) func mapper(_ context.Context, i client.Object) []reconcile.Request { @@ -114,7 +114,7 @@ func TestClusterCacheTracker(t *testing.T) { }, } g.Expect(k8sClient.Create(ctx, clusterA)).To(Succeed()) - v1beta1conditions.MarkTrue(clusterA, clusterv1.ControlPlaneInitializedV1Beta1Condition) + conditions.Set(clusterA, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedReason}) clusterA.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} g.Expect(k8sClient.Status().Update(ctx, clusterA)).To(Succeed()) diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index 22079aa88fd4..41cd0cbdf487 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -47,6 +47,7 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta2" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/finalizers" "sigs.k8s.io/cluster-api/util/patch" @@ -363,8 +364,7 @@ func (r *MachinePoolReconciler) reconcileDeleteExternal(ctx context.Context, mac func (r *MachinePoolReconciler) watchClusterNodes(ctx context.Context, cluster *clusterv1.Cluster) error { log := ctrl.LoggerFrom(ctx) - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { log.V(5).Info("Skipping node watching setup because control plane is not initialized") return nil } diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 419265b981b3..8e86148e4817 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -252,7 +252,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ct reconcileNormal := append( alwaysReconcile, r.reconcileKubeconfig, - r.reconcileControlPlaneInitialized, + r.reconcileV1Beta1ControlPlaneInitialized, ) return doReconcile(ctx, reconcileNormal, s) } @@ -704,19 +704,19 @@ func (c *clusterDescendants) filterOwnedDescendants(cluster *clusterv1.Cluster) return ownedDescendants, nil } -func (r *Reconciler) reconcileControlPlaneInitialized(ctx context.Context, s *scope) (ctrl.Result, error) { +func (r *Reconciler) reconcileV1Beta1ControlPlaneInitialized(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) cluster := s.cluster // Skip checking if the control plane is initialized when using a Control Plane Provider (this is reconciled in // reconcileControlPlane instead). if cluster.Spec.ControlPlaneRef != nil { - log.V(4).Info("Skipping reconcileControlPlaneInitialized because cluster has a controlPlaneRef") + log.V(4).Info("Skipping reconcileV1Beta1ControlPlaneInitialized because cluster has a controlPlaneRef") return ctrl.Result{}, nil } - if v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { - log.V(4).Info("Skipping reconcileControlPlaneInitialized because control plane already initialized") + if conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { + log.V(4).Info("Skipping reconcileV1Beta1ControlPlaneInitialized because control plane already initialized") return ctrl.Result{}, nil } @@ -759,8 +759,7 @@ func (r *Reconciler) controlPlaneMachineToCluster(ctx context.Context, o client. return nil } - // TODO (v1beta2): test for v1beta2 conditions - if v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { return nil } diff --git a/internal/controllers/cluster/cluster_controller_phases.go b/internal/controllers/cluster/cluster_controller_phases.go index e5475e9a19eb..3344935a5f4e 100644 --- a/internal/controllers/cluster/cluster_controller_phases.go +++ b/internal/controllers/cluster/cluster_controller_phases.go @@ -356,7 +356,7 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope) (ctrl. return ctrl.Result{}, nil } - // Update cluster.Status.ControlPlaneInitialized if it hasn't already been set. + // Update ControlPlaneInitializedV1Beta1Condition if it hasn't already been set. if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { if initialized { v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) diff --git a/internal/controllers/cluster/cluster_controller_phases_test.go b/internal/controllers/cluster/cluster_controller_phases_test.go index b9b5e6e5795f..5b2ad8e1120b 100644 --- a/internal/controllers/cluster/cluster_controller_phases_test.go +++ b/internal/controllers/cluster/cluster_controller_phases_test.go @@ -464,6 +464,8 @@ func TestClusterReconcileControlPlane(t *testing.T) { }, expectErr: false, check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Status.Initialization.ControlPlaneInitialized).To(BeTrue()) + g.Expect(v1beta1conditions.IsTrue(in, clusterv1.ControlPlaneReadyV1Beta1Condition)).To(BeTrue()) g.Expect(v1beta1conditions.IsTrue(in, clusterv1.ControlPlaneInitializedV1Beta1Condition)).To(BeTrue()) }, diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index 77753ada0099..0a8f94356879 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -424,7 +424,7 @@ func TestClusterReconciler(t *testing.T) { if err := env.Get(ctx, key, cluster); err != nil { return false } - return v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + return conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) }, timeout).Should(BeTrue()) }) } @@ -904,7 +904,7 @@ func TestObjectsPendingDelete(t *testing.T) { }) } -func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) { +func TestReconcileV1Beta1ControlPlaneInitializedControlPlaneRef(t *testing.T) { g := NewWithT(t) c := &clusterv1.Cluster{ @@ -925,7 +925,7 @@ func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) { s := &scope{ cluster: c, } - res, err := r.reconcileControlPlaneInitialized(ctx, s) + res, err := r.reconcileV1Beta1ControlPlaneInitialized(ctx, s) g.Expect(res.IsZero()).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) g.Expect(v1beta1conditions.Has(c, clusterv1.ControlPlaneInitializedV1Beta1Condition)).To(BeFalse()) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index f0185bbd613a..3a21d5f1ce75 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -58,6 +58,7 @@ import ( "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/cache" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" @@ -1052,7 +1053,7 @@ func (r *Reconciler) shouldAdopt(m *clusterv1.Machine) bool { func (r *Reconciler) watchClusterNodes(ctx context.Context, cluster *clusterv1.Cluster) error { log := ctrl.LoggerFrom(ctx) - if !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { log.V(5).Info("Skipping node watching setup because control plane is not initialized") return nil } diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index 44b6a8bd225e..a651c5ff7f52 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -35,7 +35,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util/conditions" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // updateStatus update Machine's status. @@ -265,9 +264,8 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl return } - // TODO (v1beta2): test for v1beta2 conditions - controlPlaneInitialized := v1beta1conditions.Get(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) - if controlPlaneInitialized == nil || controlPlaneInitialized.Status != corev1.ConditionTrue { + controlPlaneInitialized := conditions.Get(cluster, clusterv1.ClusterControlPlaneInitializedCondition) + if controlPlaneInitialized == nil || controlPlaneInitialized.Status != metav1.ConditionTrue { setNodeConditions(machine, metav1.ConditionUnknown, clusterv1.MachineNodeInspectionFailedReason, "Waiting for Cluster control plane to be initialized") diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 91ae469dfb82..227a29a9f5b4 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -657,13 +657,8 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { }, Status: clusterv1.ClusterStatus{ Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - {Type: clusterv1.ControlPlaneInitializedV1Beta1Condition, Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now.Add(-5 * time.Second)}}, - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: now.Add(-5 * time.Second)}}, }, }, } @@ -704,7 +699,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { name: "Cluster control plane is not initialized", cluster: func() *clusterv1.Cluster { c := defaultCluster.DeepCopy() - v1beta1conditions.MarkFalse(c, clusterv1.ControlPlaneInitializedV1Beta1Condition, "", clusterv1.ConditionSeverityError, "") + conditions.Set(c, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionFalse}) return c }(), machine: defaultMachine.DeepCopy(), @@ -940,11 +935,9 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { name: "connection down, preserve conditions as they have been set before (remote conditions grace period not passed yet)", cluster: func() *clusterv1.Cluster { c := defaultCluster.DeepCopy() - if c.Status.Deprecated != nil && c.Status.Deprecated.V1Beta1 != nil { - for i, condition := range c.Status.Deprecated.V1Beta1.Conditions { - if condition.Type == clusterv1.ControlPlaneInitializedV1Beta1Condition { - c.Status.Deprecated.V1Beta1.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) - } + for i, condition := range c.Status.Conditions { + if condition.Type == clusterv1.ClusterControlPlaneInitializedCondition { + c.Status.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) } } return c @@ -988,11 +981,9 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { name: "connection down, set conditions as they haven't been set before (remote conditions grace period not passed yet)", cluster: func() *clusterv1.Cluster { c := defaultCluster.DeepCopy() - if c.Status.Deprecated != nil && c.Status.Deprecated.V1Beta1 != nil { - for i, condition := range c.Status.Deprecated.V1Beta1.Conditions { - if condition.Type == clusterv1.ControlPlaneInitializedV1Beta1Condition { - c.Status.Deprecated.V1Beta1.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) - } + for i, condition := range c.Status.Conditions { + if condition.Type == clusterv1.ClusterControlPlaneInitializedCondition { + c.Status.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) } } return c @@ -1023,11 +1014,9 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { name: "connection down, set conditions to unknown (remote conditions grace period passed)", cluster: func() *clusterv1.Cluster { c := defaultCluster.DeepCopy() - if c.Status.Deprecated != nil && c.Status.Deprecated.V1Beta1 != nil { - for i, condition := range c.Status.Deprecated.V1Beta1.Conditions { - if condition.Type == clusterv1.ControlPlaneInitializedV1Beta1Condition { - c.Status.Deprecated.V1Beta1.Conditions[i].LastTransitionTime.Time = now.Add(-7 * time.Minute) - } + for i, condition := range c.Status.Conditions { + if condition.Type == clusterv1.ClusterControlPlaneInitializedCondition { + c.Status.Conditions[i].LastTransitionTime.Time = now.Add(-7 * time.Minute) } } return c diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index b8d378b89778..23de918b4619 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1047,13 +1047,6 @@ func TestMachineV1Beta1Conditions(t *testing.T) { Status: metav1.ConditionTrue, }, }, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(clusterv1.ControlPlaneInitializedV1Beta1Condition), - }, - }, - }, }, } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 97dba1b73b0e..c4750a244bf0 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -203,7 +203,7 @@ func (r *Reconciler) reconcile(ctx context.Context, logger logr.Logger, cluster // If the cluster is already initialized, get the remote cluster cache to use as a client.Reader. var remoteClient client.Client - if v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { var err error remoteClient, err = r.ClusterCache.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index 75b3521ee549..bed05f8d0a11 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -127,8 +127,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi // Don't penalize any Machine/Node if the control plane has not been initialized // Exception of this rule are control plane machine itself, so the first control plane machine can be remediated. - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) && !util.IsControlPlaneMachine(t.Machine) { + if !conditions.IsTrue(t.Cluster, clusterv1.ClusterControlPlaneInitializedCondition) && !util.IsControlPlaneMachine(t.Machine) { logger.V(5).Info("Not evaluating target health because the control plane has not yet been initialized") // Return a nextCheck time of 0 because we'll get requeued when the Cluster is updated. return false, 0 @@ -149,7 +148,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi return false, 0 } - controlPlaneInitialized := v1beta1conditions.GetLastTransitionTime(t.Cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + controlPlaneInitialized := conditions.GetLastTransitionTime(t.Cluster, clusterv1.ClusterControlPlaneInitializedCondition) clusterInfraReady := v1beta1conditions.GetLastTransitionTime(t.Cluster, clusterv1.InfrastructureReadyV1Beta1Condition) machineInfraReady := v1beta1conditions.GetLastTransitionTime(t.Machine, clusterv1.InfrastructureReadyV1Beta1Condition) machineCreationTime := t.Machine.CreationTimestamp.Time @@ -162,7 +161,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi "controlPlaneInitializedTime", controlPlaneInitialized, "machineInfraReadyTime", machineInfraReady, ) - if v1beta1conditions.IsTrue(t.Cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) && controlPlaneInitialized != nil && controlPlaneInitialized.Time.After(comparisonTime) { + if conditions.IsTrue(t.Cluster, clusterv1.ClusterControlPlaneInitializedCondition) && controlPlaneInitialized != nil && controlPlaneInitialized.Time.After(comparisonTime) { comparisonTime = controlPlaneInitialized.Time } if v1beta1conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyV1Beta1Condition) && clusterInfraReady != nil && clusterInfraReady.Time.After(comparisonTime) { diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index dd7cf33b7376..d7cce3608e9c 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -31,6 +31,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/errors" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -196,16 +197,23 @@ func TestHealthCheckTargets(t *testing.T) { }, } v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyV1Beta1Condition) - v1beta1conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) + conditions.Set(cluster, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}) // Ensure the control plane was initialized earlier to prevent it interfering with // NodeStartupTimeout testing. - conds := clusterv1.Conditions{} - for _, condition := range cluster.GetV1Beta1Conditions() { + conds := []metav1.Condition{} + for _, condition := range cluster.GetConditions() { condition.LastTransitionTime = metav1.NewTime(condition.LastTransitionTime.Add(-1 * time.Hour)) conds = append(conds, condition) } - cluster.SetV1Beta1Conditions(conds) + cluster.SetConditions(conds) + + v1beta1Conditions := clusterv1.Conditions{} + for _, condition := range cluster.GetV1Beta1Conditions() { + condition.LastTransitionTime = metav1.NewTime(condition.LastTransitionTime.Add(-1 * time.Hour)) + v1beta1Conditions = append(v1beta1Conditions, condition) + } + cluster.SetV1Beta1Conditions(v1beta1Conditions) mhcSelector := map[string]string{"cluster": clusterName, "machine-group": "foo"} diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index fda8a7ca6155..44e59efd6e83 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -220,10 +221,9 @@ func (r *Reconciler) callAfterControlPlaneInitialized(ctx context.Context, s *sc } func isControlPlaneInitialized(cluster *clusterv1.Cluster) bool { - // TODO (v1beta2) switch to v1beta2 conditions - for _, condition := range cluster.GetV1Beta1Conditions() { - if condition.Type == clusterv1.ControlPlaneInitializedV1Beta1Condition { - if condition.Status == corev1.ConditionTrue { + for _, condition := range cluster.GetConditions() { + if condition.Type == clusterv1.ClusterControlPlaneInitializedCondition { + if condition.Status == metav1.ConditionTrue { return true } } diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 4e8e558d5ec2..551b42c06590 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -348,15 +348,8 @@ func TestReconcile_callAfterControlPlaneInitialized(t *testing.T) { InfrastructureRef: &corev1.ObjectReference{}, }, Status: clusterv1.ClusterStatus{ - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.ControlPlaneInitializedV1Beta1Condition, - Status: corev1.ConditionTrue, - }, - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}, }, }, }, @@ -380,15 +373,8 @@ func TestReconcile_callAfterControlPlaneInitialized(t *testing.T) { InfrastructureRef: &corev1.ObjectReference{}, }, Status: clusterv1.ClusterStatus{ - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.ControlPlaneInitializedV1Beta1Condition, - Status: corev1.ConditionTrue, - }, - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}, }, }, }, @@ -412,15 +398,8 @@ func TestReconcile_callAfterControlPlaneInitialized(t *testing.T) { InfrastructureRef: &corev1.ObjectReference{}, }, Status: clusterv1.ClusterStatus{ - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.ControlPlaneInitializedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionFalse}, }, }, }, @@ -441,15 +420,8 @@ func TestReconcile_callAfterControlPlaneInitialized(t *testing.T) { InfrastructureRef: &corev1.ObjectReference{}, }, Status: clusterv1.ClusterStatus{ - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.ControlPlaneInitializedV1Beta1Condition, - Status: corev1.ConditionTrue, - }, - }, - }, + Conditions: []metav1.Condition{ + {Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}, }, }, }, diff --git a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go index 4f2176a993f7..ac0eea897d03 100644 --- a/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/docker/dockermachine_backend.go @@ -165,8 +165,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // Make sure bootstrap data is available and populated. if dataSecretName == nil { - // TODO (v1beta2): test for v1beta2 conditions - if !util.IsControlPlaneMachine(machine) && !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { log.Info("Waiting for the control plane to be initialized") v1beta1conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") conditions.Set(dockerMachine, metav1.Condition{ @@ -340,9 +339,8 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // NOTE: If the Cluster doesn't use a control plane, the ControlPlaneInitialized condition is only // set to true after a control plane machine has a node ref. If we would requeue here in this case, the // Machine will never get a node ref as ProviderID is required to set the node ref, so we would get a deadlock. - // TODO (v1beta2): test for v1beta2 conditions if cluster.Spec.ControlPlaneRef != nil && - !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } diff --git a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go index 171feb310bdd..dcc2df75f51e 100644 --- a/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go +++ b/test/infrastructure/docker/internal/controllers/backends/inmemory/inmemorymachine_backend.go @@ -109,8 +109,7 @@ func (r *MachineBackendReconciler) ReconcileNormal(ctx context.Context, cluster // NOTE: we are not using bootstrap data, but we wait for it in order to simulate a real machine // provisioning workflow. if machine.Spec.Bootstrap.DataSecretName == nil { - // TODO (v1beta2): test for v1beta2 conditions - if !util.IsControlPlaneMachine(machine) && !v1beta1conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { v1beta1conditions.MarkFalse(inMemoryMachine, infrav1.VMProvisionedCondition, infrav1.WaitingControlPlaneInitializedReason, clusterv1.ConditionSeverityInfo, "") conditions.Set(inMemoryMachine, metav1.Condition{ Type: infrav1.DevMachineInMemoryVMProvisionedV1Beta2Condition, diff --git a/util/conditions/getter.go b/util/conditions/getter.go index c278ff59bbb6..e79774c20d0f 100644 --- a/util/conditions/getter.go +++ b/util/conditions/getter.go @@ -102,6 +102,14 @@ func GetMessage(from Getter, conditionType string) string { return "" } +// GetLastTransitionTime returns a nil safe metav1.Time of LastTransitionTime. +func GetLastTransitionTime(from Getter, conditionType string) *metav1.Time { + if c := Get(from, conditionType); c != nil { + return &c.LastTransitionTime + } + return nil +} + // UnstructuredGetAll returns conditions from an Unstructured object. // // UnstructuredGetAll supports retrieving conditions from objects at different stages of the transition from diff --git a/util/predicates/cluster_predicates.go b/util/predicates/cluster_predicates.go index 4a427f8a6680..5c27f20f01c1 100644 --- a/util/predicates/cluster_predicates.go +++ b/util/predicates/cluster_predicates.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" ) // ClusterCreateInfraProvisioned returns a predicate that returns true for a create event when the Cluster infrastructure is provisioned. @@ -243,10 +243,8 @@ func ClusterControlPlaneInitialized(scheme *runtime.Scheme, logger logr.Logger) } newCluster := e.ObjectNew.(*clusterv1.Cluster) - - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.IsTrue(oldCluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) && - v1beta1conditions.IsTrue(newCluster, clusterv1.ControlPlaneInitializedV1Beta1Condition) { + if !conditions.IsTrue(oldCluster, clusterv1.ClusterControlPlaneInitializedCondition) && + conditions.IsTrue(newCluster, clusterv1.ClusterControlPlaneInitializedCondition) { log.V(6).Info("Cluster ControlPlaneInitialized was set, allow further processing") return true } diff --git a/util/predicates/cluster_predicates_test.go b/util/predicates/cluster_predicates_test.go index f137d37c3b93..b3d87c76c59a 100644 --- a/util/predicates/cluster_predicates_test.go +++ b/util/predicates/cluster_predicates_test.go @@ -21,12 +21,13 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -35,10 +36,10 @@ func TestClusterControlplaneInitializedPredicate(t *testing.T) { predicate := predicates.ClusterControlPlaneInitialized(runtime.NewScheme(), logr.New(log.NullLogSink{})) markedFalse := clusterv1.Cluster{} - v1beta1conditions.MarkFalse(&markedFalse, clusterv1.ControlPlaneInitializedV1Beta1Condition, clusterv1.MissingNodeRefV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") + conditions.Set(&markedFalse, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionFalse}) markedTrue := clusterv1.Cluster{} - v1beta1conditions.MarkTrue(&markedTrue, clusterv1.ControlPlaneInitializedV1Beta1Condition) + conditions.Set(&markedTrue, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}) notMarked := clusterv1.Cluster{} From fa65d1cffa5b38125a5d444417a3393f50e4e96c Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 18 Apr 2025 18:00:33 +0200 Subject: [PATCH 10/20] Stop using clusterv1.InfrastructureReadyV1Beta1Condition in controllers --- .../cluster/cluster_controller_test.go | 8 +++++-- .../machine/machine_controller_test.go | 9 ++++--- .../machinehealthcheck_controller_test.go | 24 ++++++++++++++++--- .../machinehealthcheck_targets.go | 10 ++++---- .../machinehealthcheck_targets_test.go | 18 ++------------ .../machinehealthcheck/suite_test.go | 9 ------- 6 files changed, 40 insertions(+), 38 deletions(-) diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index 0a8f94356879..3cb412a7b01f 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -229,7 +229,11 @@ func TestClusterReconciler(t *testing.T) { g.Eventually(func() bool { ph, err := patch.NewHelper(cluster, env) g.Expect(err).ToNot(HaveOccurred()) - v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyV1Beta1Condition) + conditions.Set(cluster, metav1.Condition{ + Type: clusterv1.ClusterInfrastructureReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterInfrastructureReadyReason, + }) g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) return true }, timeout).Should(BeTrue()) @@ -240,7 +244,7 @@ func TestClusterReconciler(t *testing.T) { if err := env.Get(ctx, key, instance); err != nil { return false } - return v1beta1conditions.IsTrue(cluster, clusterv1.InfrastructureReadyV1Beta1Condition) + return conditions.IsTrue(cluster, clusterv1.ClusterInfrastructureReadyCondition) }, timeout).Should(BeTrue()) }) diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 23de918b4619..4e4bc3c75a23 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -494,11 +494,11 @@ func TestMachine_Reconcile(t *testing.T) { if err := env.Get(ctx, key, machine); err != nil { return false } - if !v1beta1conditions.Has(machine, clusterv1.InfrastructureReadyV1Beta1Condition) { + if !conditions.Has(machine, clusterv1.MachineInfrastructureReadyCondition) { return false } - readyCondition := v1beta1conditions.Get(machine, clusterv1.ReadyV1Beta1Condition) - return readyCondition.Status == corev1.ConditionTrue + readyCondition := conditions.Get(machine, clusterv1.MachineInfrastructureReadyCondition) + return readyCondition.Status == metav1.ConditionTrue }, timeout).Should(BeTrue()) g.Expect(env.Delete(ctx, machine)).ToNot(HaveOccurred()) @@ -1256,6 +1256,9 @@ func TestMachineV1Beta1Conditions(t *testing.T) { wantErr: true, conditionsToAssert: []metav1.Condition{ {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineReadyUnknownReason, Message: "* NodeHealthy: Please check controller logs for errors"}, + {Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineBootstrapConfigReadyReason, Message: ""}, + {Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineInfrastructureReadyReason, Message: ""}, + {Type: clusterv1.MachineNodeHealthyCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineNodeInternalErrorReason, Message: "Please check controller logs for errors"}, }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.TrueCondition(clusterv1.InfrastructureReadyV1Beta1Condition), diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 3e6d914c132e..68ee40df7c04 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -262,7 +262,11 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { patchHelper, err := patch.NewHelper(cluster, env.Client) g.Expect(err).ToNot(HaveOccurred()) - v1beta1conditions.MarkFalse(cluster, clusterv1.InfrastructureReadyV1Beta1Condition, "SomeReason", clusterv1.ConditionSeverityError, "") + conditions.Set(cluster, metav1.Condition{ + Type: clusterv1.ClusterInfrastructureReadyCondition, + Status: metav1.ConditionFalse, + Reason: clusterv1.ClusterControlPlaneNotInitializedReason, + }) g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) mhc := newMachineHealthCheck(cluster.Namespace, cluster.Name) @@ -2451,13 +2455,27 @@ func createCluster(g *WithT, namespaceName string) *clusterv1.Cluster { // This is required for MHC to perform checks patchHelper, err := patch.NewHelper(cluster, env.Client) g.Expect(err).ToNot(HaveOccurred()) - v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyV1Beta1Condition) + + cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: true, + } + conditions.Set(cluster, metav1.Condition{ + Type: clusterv1.ClusterInfrastructureReadyCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterInfrastructureReadyReason, + }) + + conditions.Set(cluster, metav1.Condition{ + Type: clusterv1.ClusterControlPlaneInitializedCondition, + Status: metav1.ConditionTrue, + Reason: clusterv1.ClusterControlPlaneInitializedReason, + }) g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) // Wait for cluster in the cached client to be updated post-patch g.Eventually(func(g Gomega) { g.Expect(env.Get(ctx, util.ObjectKey(cluster), cluster)).To(Succeed()) - g.Expect(v1beta1conditions.IsTrue(cluster, clusterv1.InfrastructureReadyV1Beta1Condition)).To(BeTrue()) + g.Expect(conditions.IsTrue(cluster, clusterv1.ClusterInfrastructureReadyCondition)).To(BeTrue()) }, timeout, 100*time.Millisecond).Should(Succeed()) return cluster diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index bed05f8d0a11..3dfad3afc610 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -134,7 +134,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi } // Don't penalize any Machine/Node if the cluster infrastructure is not ready. - if !v1beta1conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyV1Beta1Condition) { + if !conditions.IsTrue(t.Cluster, clusterv1.ClusterInfrastructureReadyCondition) { logger.V(5).Info("Not evaluating target health because the cluster infrastructure is not ready") // Return a nextCheck time of 0 because we'll get requeued when the Cluster is updated. return false, 0 @@ -149,8 +149,8 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi } controlPlaneInitialized := conditions.GetLastTransitionTime(t.Cluster, clusterv1.ClusterControlPlaneInitializedCondition) - clusterInfraReady := v1beta1conditions.GetLastTransitionTime(t.Cluster, clusterv1.InfrastructureReadyV1Beta1Condition) - machineInfraReady := v1beta1conditions.GetLastTransitionTime(t.Machine, clusterv1.InfrastructureReadyV1Beta1Condition) + clusterInfraReady := conditions.GetLastTransitionTime(t.Cluster, clusterv1.ClusterInfrastructureReadyCondition) + machineInfraReady := conditions.GetLastTransitionTime(t.Machine, clusterv1.MachineInfrastructureReadyCondition) machineCreationTime := t.Machine.CreationTimestamp.Time // Use the latest of the following timestamps. @@ -164,10 +164,10 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi if conditions.IsTrue(t.Cluster, clusterv1.ClusterControlPlaneInitializedCondition) && controlPlaneInitialized != nil && controlPlaneInitialized.Time.After(comparisonTime) { comparisonTime = controlPlaneInitialized.Time } - if v1beta1conditions.IsTrue(t.Cluster, clusterv1.InfrastructureReadyV1Beta1Condition) && clusterInfraReady != nil && clusterInfraReady.Time.After(comparisonTime) { + if conditions.IsTrue(t.Cluster, clusterv1.ClusterInfrastructureReadyCondition) && clusterInfraReady != nil && clusterInfraReady.Time.After(comparisonTime) { comparisonTime = clusterInfraReady.Time } - if v1beta1conditions.IsTrue(t.Machine, clusterv1.InfrastructureReadyV1Beta1Condition) && machineInfraReady != nil && machineInfraReady.Time.After(comparisonTime) { + if conditions.IsTrue(t.Machine, clusterv1.MachineInfrastructureReadyCondition) && machineInfraReady != nil && machineInfraReady.Time.After(comparisonTime) { comparisonTime = machineInfraReady.Time } logger.V(5).Info("Using comparison time", "time", comparisonTime) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index d7cce3608e9c..607a6596970c 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -196,7 +196,7 @@ func TestHealthCheckTargets(t *testing.T) { Name: clusterName, }, } - v1beta1conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyV1Beta1Condition) + conditions.Set(cluster, metav1.Condition{Type: clusterv1.ClusterInfrastructureReadyCondition, Status: metav1.ConditionTrue}) conditions.Set(cluster, metav1.Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue}) // Ensure the control plane was initialized earlier to prevent it interfering with @@ -208,13 +208,6 @@ func TestHealthCheckTargets(t *testing.T) { } cluster.SetConditions(conds) - v1beta1Conditions := clusterv1.Conditions{} - for _, condition := range cluster.GetV1Beta1Conditions() { - condition.LastTransitionTime = metav1.NewTime(condition.LastTransitionTime.Add(-1 * time.Hour)) - v1beta1Conditions = append(v1beta1Conditions, condition) - } - cluster.SetV1Beta1Conditions(v1beta1Conditions) - mhcSelector := map[string]string{"cluster": clusterName, "machine-group": "foo"} timeoutForMachineToHaveNode := 10 * time.Minute @@ -250,14 +243,7 @@ func TestHealthCheckTargets(t *testing.T) { testMachine := newTestMachine("machine1", namespace, clusterName, "node1", mhcSelector) testMachineWithInfraReady := testMachine.DeepCopy() testMachineWithInfraReady.CreationTimestamp = metav1.NewTime(time.Now().Add(-100 * time.Second)) - testMachineWithInfraReady.SetV1Beta1Conditions(clusterv1.Conditions{ - { - Type: clusterv1.InfrastructureReadyV1Beta1Condition, - Status: corev1.ConditionTrue, - Severity: clusterv1.ConditionSeverityInfo, - LastTransitionTime: metav1.NewTime(testMachineWithInfraReady.CreationTimestamp.Add(50 * time.Second)), - }, - }) + conditions.Set(testMachineWithInfraReady, metav1.Condition{Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionTrue, LastTransitionTime: metav1.NewTime(testMachineWithInfraReady.CreationTimestamp.Add(50 * time.Second))}) nodeNotYetStartedTargetAndInfraReady := healthCheckTarget{ Cluster: cluster, diff --git a/internal/controllers/machinehealthcheck/suite_test.go b/internal/controllers/machinehealthcheck/suite_test.go index 0feb1e85070d..5c4abf96e4cc 100644 --- a/internal/controllers/machinehealthcheck/suite_test.go +++ b/internal/controllers/machinehealthcheck/suite_test.go @@ -36,7 +36,6 @@ import ( "sigs.k8s.io/cluster-api/api/v1beta2/index" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/remote" - clustercontroller "sigs.k8s.io/cluster-api/internal/controllers/cluster" machinecontroller "sigs.k8s.io/cluster-api/internal/controllers/machine" machinesetcontroller "sigs.k8s.io/cluster-api/internal/controllers/machineset" "sigs.k8s.io/cluster-api/internal/test/envtest" @@ -89,14 +88,6 @@ func TestMain(m *testing.M) { clusterCache.(interface{ SetConnectionCreationRetryInterval(time.Duration) }). SetConnectionCreationRetryInterval(2 * time.Second) - if err := (&clustercontroller.Reconciler{ - Client: mgr.GetClient(), - APIReader: mgr.GetClient(), - ClusterCache: clusterCache, - RemoteConnectionGracePeriod: 50 * time.Second, - }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil { - panic(fmt.Sprintf("Failed to start ClusterReconciler: %v", err)) - } if err := (&Reconciler{ Client: mgr.GetClient(), ClusterCache: clusterCache, From dae842fa6698cebc1434b9789f6b5739cc0d3852 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Tue, 22 Apr 2025 12:47:24 +0200 Subject: [PATCH 11/20] Stop using clusterv1.ReadyV1Beta1Condition in controllers --- cmd/clusterctl/client/tree/tree.go | 54 ++--- cmd/clusterctl/client/tree/tree_test.go | 8 +- cmd/clusterctl/client/tree/util.go | 30 +-- .../machine/machine_controller_test.go | 43 ++-- internal/util/tree/tree.go | 20 +- util/collections/machine_filters.go | 4 +- util/patch/patch_test.go | 199 +++++++++--------- 7 files changed, 178 insertions(+), 180 deletions(-) diff --git a/cmd/clusterctl/client/tree/tree.go b/cmd/clusterctl/client/tree/tree.go index 6d080a0556eb..99e051ebc737 100644 --- a/cmd/clusterctl/client/tree/tree.go +++ b/cmd/clusterctl/client/tree/tree.go @@ -97,17 +97,17 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad // Get a small set of conditions that will be used to determine e.g. when grouping or when an object is just an echo of // its parent. - var objReady, parentReady *clusterv1.Condition - var objAvailableV1Beta2, objReadyV1Beta2, objUpToDateV1Beta2, parentReadyV1Beta2 *metav1.Condition + var objReadyV1Beta1, parentReadyV1Beta1 *clusterv1.Condition + var objAvailable, objReady, objUpToDate, parentReady *metav1.Condition switch od.options.V1Beta2 { case true: - objAvailableV1Beta2 = GetAvailableV1Beta2Condition(obj) - objReadyV1Beta2 = GetReadyV1Beta2Condition(obj) - objUpToDateV1Beta2 = GetMachineUpToDateV1Beta2Condition(obj) - parentReadyV1Beta2 = GetReadyV1Beta2Condition(parent) - default: + objAvailable = GetAvailableCondition(obj) objReady = GetReadyCondition(obj) + objUpToDate = GetMachineUpToDateCondition(obj) parentReady = GetReadyCondition(parent) + default: + objReadyV1Beta1 = GetV1Beta1ReadyCondition(obj) + parentReadyV1Beta1 = GetV1Beta1ReadyCondition(parent) } // If it is requested to show all the conditions for the object, add @@ -121,11 +121,11 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad if addOpts.NoEcho && !od.options.Echo { switch od.options.V1Beta2 { case true: - if (objReadyV1Beta2 != nil && objReadyV1Beta2.Status == metav1.ConditionTrue) || hasSameAvailableReadyUptoDateStatusAndReason(nil, nil, parentReadyV1Beta2, objReadyV1Beta2, nil, nil) { + if (objReady != nil && objReady.Status == metav1.ConditionTrue) || hasSameAvailableReadyUptoDateStatusAndReason(nil, nil, parentReady, objReady, nil, nil) { return false, false } default: - if (objReady != nil && objReady.Status == corev1.ConditionTrue) || hasSameReadyStatusSeverityAndReason(parentReady, objReady) { + if (objReadyV1Beta1 != nil && objReadyV1Beta1.Status == corev1.ConditionTrue) || hasSameReadyStatusSeverityAndReason(parentReadyV1Beta1, objReadyV1Beta1) { return false, false } } @@ -154,24 +154,24 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad for i := range siblings { s := siblings[i] - var sReady *clusterv1.Condition - var sAvailableV1Beta2, sReadyV1Beta2, sUpToDateV1Beta2 *metav1.Condition + var sReadyV1Beta1 *clusterv1.Condition + var sAvailable, sReady, sUpToDate *metav1.Condition switch od.options.V1Beta2 { case true: // If the object's ready condition has a different Available/ReadyUpToDate condition than the sibling object, // move on (they should not be grouped). - sAvailableV1Beta2 = GetAvailableV1Beta2Condition(s) - sReadyV1Beta2 = GetReadyV1Beta2Condition(s) - sUpToDateV1Beta2 = GetMachineUpToDateV1Beta2Condition(s) - if !hasSameAvailableReadyUptoDateStatusAndReason(objAvailableV1Beta2, sAvailableV1Beta2, objReadyV1Beta2, sReadyV1Beta2, objUpToDateV1Beta2, sUpToDateV1Beta2) { + sAvailable = GetAvailableCondition(s) + sReady = GetReadyCondition(s) + sUpToDate = GetMachineUpToDateCondition(s) + if !hasSameAvailableReadyUptoDateStatusAndReason(objAvailable, sAvailable, objReady, sReady, objUpToDate, sUpToDate) { continue } default: - sReady = GetReadyCondition(s) + sReadyV1Beta1 = GetV1Beta1ReadyCondition(s) // If the object's ready condition has a different Status, Severity and Reason than the sibling object, // move on (they should not be grouped). - if !hasSameReadyStatusSeverityAndReason(objReady, sReady) { + if !hasSameReadyStatusSeverityAndReason(objReadyV1Beta1, sReadyV1Beta1) { continue } } @@ -183,9 +183,9 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad if s.GetObjectKind().GroupVersionKind().Kind == obj.GetObjectKind().GroupVersionKind().Kind+"Group" { switch od.options.V1Beta2 { case true: - updateGroupNode(s, sReadyV1Beta2, obj, objAvailableV1Beta2, objReadyV1Beta2, objUpToDateV1Beta2) + updateGroupNode(s, sReady, obj, objAvailable, objReady, objUpToDate) default: - updateV1Beta1GroupNode(s, sReady, obj, objReady) + updateV1Beta1GroupNode(s, sReadyV1Beta1, obj, objReadyV1Beta1) } return true, false @@ -201,9 +201,9 @@ func (od ObjectTree) Add(parent, obj client.Object, opts ...AddObjectOption) (ad var groupNode *NodeObject switch od.options.V1Beta2 { case true: - groupNode = createGroupNode(s, sReadyV1Beta2, obj, objAvailableV1Beta2, objReadyV1Beta2, objUpToDateV1Beta2) + groupNode = createGroupNode(s, sReady, obj, objAvailable, objReady, objUpToDate) default: - groupNode = createV1Beta1GroupNode(s, sReady, obj, objReady) + groupNode = createV1Beta1GroupNode(s, sReadyV1Beta1, obj, objReadyV1Beta1) } // By default, grouping objects should be sorted last. @@ -332,7 +332,7 @@ func createGroupNode(sibling client.Object, siblingReady *metav1.Condition, obj if objAvailable != nil { objAvailable.LastTransitionTime = metav1.Time{} objAvailable.Message = "" - setAvailableV1Beta2Condition(groupNode, objAvailable) + setAvailableCondition(groupNode, objAvailable) if objAvailable.Status == metav1.ConditionTrue { // When creating a group, it is already the sum of obj and its own sibling, // and they all have same conditions. @@ -345,7 +345,7 @@ func createGroupNode(sibling client.Object, siblingReady *metav1.Condition, obj if objReady != nil { objReady.LastTransitionTime = minLastTransitionTime(objReady, siblingReady) objReady.Message = "" - setReadyV1Beta2Condition(groupNode, objReady) + setReadyCondition(groupNode, objReady) if objReady.Status == metav1.ConditionTrue { // When creating a group, it is already the sum of obj and its own sibling, // and they all have same conditions. @@ -358,7 +358,7 @@ func createGroupNode(sibling client.Object, siblingReady *metav1.Condition, obj if objUpToDate != nil { objUpToDate.LastTransitionTime = metav1.Time{} objUpToDate.Message = "" - setUpToDateV1Beta2Condition(groupNode, objUpToDate) + setUpToDateCondition(groupNode, objUpToDate) if objUpToDate.Status == metav1.ConditionTrue { // When creating a group, it is already the sum of obj and its own sibling, // and they all have same conditions. @@ -370,7 +370,7 @@ func createGroupNode(sibling client.Object, siblingReady *metav1.Condition, obj } func readyStatusReasonUID(obj client.Object) string { - ready := GetReadyV1Beta2Condition(obj) + ready := GetReadyCondition(obj) if ready == nil { return fmt.Sprintf("zzz_%s", util.RandomString(6)) } @@ -417,7 +417,7 @@ func createV1Beta1GroupNode(sibling client.Object, siblingReady *clusterv1.Condi } func readyStatusSeverityAndReasonUID(obj client.Object) string { - ready := GetReadyCondition(obj) + ready := GetV1Beta1ReadyCondition(obj) if ready == nil { return fmt.Sprintf("zzz_%s", util.RandomString(6)) } @@ -459,7 +459,7 @@ func updateGroupNode(groupObj client.Object, groupReady *metav1.Condition, obj c if groupReady != nil { groupReady.LastTransitionTime = minLastTransitionTime(objReady, groupReady) groupReady.Message = "" - setReadyV1Beta2Condition(groupObj, groupReady) + setReadyCondition(groupObj, groupReady) } if objReady != nil && objReady.Status == metav1.ConditionTrue { diff --git a/cmd/clusterctl/client/tree/tree_test.go b/cmd/clusterctl/client/tree/tree_test.go index 058eaf4299de..2d4221a30557 100644 --- a/cmd/clusterctl/client/tree/tree_test.go +++ b/cmd/clusterctl/client/tree/tree_test.go @@ -565,7 +565,7 @@ func Test_createGroupNode(t *testing.T) { } g := NewWithT(t) - got := createGroupNode(sibling, GetReadyV1Beta2Condition(sibling), obj, GetAvailableV1Beta2Condition(obj), GetReadyV1Beta2Condition(obj), GetMachineUpToDateV1Beta2Condition(obj)) + got := createGroupNode(sibling, GetReadyCondition(sibling), obj, GetAvailableCondition(obj), GetReadyCondition(obj), GetMachineUpToDateCondition(obj)) // Some values are generated randomly, so pick up them. want.SetName(got.GetName()) @@ -653,7 +653,7 @@ func Test_createV1Beta1GroupNode(t *testing.T) { } g := NewWithT(t) - got := createV1Beta1GroupNode(sibling, GetReadyCondition(sibling), obj, GetReadyCondition(obj)) + got := createV1Beta1GroupNode(sibling, GetV1Beta1ReadyCondition(sibling), obj, GetV1Beta1ReadyCondition(obj)) // Some values are generated randomly, so pick up them. want.SetName(got.GetName()) @@ -738,7 +738,7 @@ func Test_updateGroupNode(t *testing.T) { } g := NewWithT(t) - updateGroupNode(group, GetReadyV1Beta2Condition(group), obj, GetAvailableV1Beta2Condition(obj), GetReadyV1Beta2Condition(obj), GetMachineUpToDateV1Beta2Condition(obj)) + updateGroupNode(group, GetReadyCondition(group), obj, GetAvailableCondition(obj), GetReadyCondition(obj), GetMachineUpToDateCondition(obj)) g.Expect(group).To(BeComparableTo(want)) } @@ -827,7 +827,7 @@ func Test_updateV1Beta1GroupNode(t *testing.T) { } g := NewWithT(t) - updateV1Beta1GroupNode(group, GetReadyCondition(group), obj, GetReadyCondition(obj)) + updateV1Beta1GroupNode(group, GetV1Beta1ReadyCondition(group), obj, GetV1Beta1ReadyCondition(obj)) g.Expect(group).To(BeComparableTo(want)) } diff --git a/cmd/clusterctl/client/tree/util.go b/cmd/clusterctl/client/tree/util.go index 6c95897132da..25b35436c553 100644 --- a/cmd/clusterctl/client/tree/util.go +++ b/cmd/clusterctl/client/tree/util.go @@ -35,8 +35,8 @@ import ( // GroupVersionVirtualObject is the group version for VirtualObject. var GroupVersionVirtualObject = schema.GroupVersion{Group: "virtual.cluster.x-k8s.io", Version: clusterv1.GroupVersion.Version} -// GetReadyV1Beta2Condition returns the ReadyCondition for an object, if defined. -func GetReadyV1Beta2Condition(obj client.Object) *metav1.Condition { +// GetReadyCondition returns the ReadyCondition for an object, if defined. +func GetReadyCondition(obj client.Object) *metav1.Condition { if getter, ok := obj.(conditions.Getter); ok { return conditions.Get(getter, clusterv1.ReadyCondition) } @@ -52,8 +52,8 @@ func GetReadyV1Beta2Condition(obj client.Object) *metav1.Condition { return nil } -// GetAvailableV1Beta2Condition returns the AvailableCondition for an object, if defined. -func GetAvailableV1Beta2Condition(obj client.Object) *metav1.Condition { +// GetAvailableCondition returns the AvailableCondition for an object, if defined. +func GetAvailableCondition(obj client.Object) *metav1.Condition { if getter, ok := obj.(conditions.Getter); ok { return conditions.Get(getter, clusterv1.AvailableCondition) } @@ -69,17 +69,17 @@ func GetAvailableV1Beta2Condition(obj client.Object) *metav1.Condition { return nil } -// GetMachineUpToDateV1Beta2Condition returns machine's UpToDate condition, if defined. +// GetMachineUpToDateCondition returns machine's UpToDate condition, if defined. // Note: The UpToDate condition only exist on machines, so no need to support reading from unstructured. -func GetMachineUpToDateV1Beta2Condition(obj client.Object) *metav1.Condition { +func GetMachineUpToDateCondition(obj client.Object) *metav1.Condition { if getter, ok := obj.(conditions.Getter); ok { return conditions.Get(getter, clusterv1.MachineUpToDateCondition) } return nil } -// GetReadyCondition returns the ReadyCondition for an object, if defined. -func GetReadyCondition(obj client.Object) *clusterv1.Condition { +// GetV1Beta1ReadyCondition returns the ReadyCondition for an object, if defined. +func GetV1Beta1ReadyCondition(obj client.Object) *clusterv1.Condition { getter := objToGetter(obj) if getter == nil { return nil @@ -87,8 +87,8 @@ func GetReadyCondition(obj client.Object) *clusterv1.Condition { return v1beta1conditions.Get(getter, clusterv1.ReadyV1Beta1Condition) } -// GetAllV1Beta2Conditions returns the other conditions (all the conditions except ready) for an object, if defined. -func GetAllV1Beta2Conditions(obj client.Object) []metav1.Condition { +// GetConditions returns conditions for an object, if defined. +func GetConditions(obj client.Object) []metav1.Condition { if getter, ok := obj.(conditions.Getter); ok { return getter.GetConditions() } @@ -104,8 +104,8 @@ func GetAllV1Beta2Conditions(obj client.Object) []metav1.Condition { return nil } -// GetOtherConditions returns the other conditions (all the conditions except ready) for an object, if defined. -func GetOtherConditions(obj client.Object) []*clusterv1.Condition { +// GetOtherV1Beta1Conditions returns the other conditions (all the conditions except ready) for an object, if defined. +func GetOtherV1Beta1Conditions(obj client.Object) []*clusterv1.Condition { getter := objToGetter(obj) if getter == nil { return nil @@ -122,19 +122,19 @@ func GetOtherConditions(obj client.Object) []*clusterv1.Condition { return conditions } -func setAvailableV1Beta2Condition(obj client.Object, available *metav1.Condition) { +func setAvailableCondition(obj client.Object, available *metav1.Condition) { if setter, ok := obj.(conditions.Setter); ok { conditions.Set(setter, *available) } } -func setReadyV1Beta2Condition(obj client.Object, ready *metav1.Condition) { +func setReadyCondition(obj client.Object, ready *metav1.Condition) { if setter, ok := obj.(conditions.Setter); ok { conditions.Set(setter, *ready) } } -func setUpToDateV1Beta2Condition(obj client.Object, upToDate *metav1.Condition) { +func setUpToDateCondition(obj client.Object, upToDate *metav1.Condition) { if setter, ok := obj.(conditions.Setter); ok { conditions.Set(setter, *upToDate) } diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 4e4bc3c75a23..0f374efaf03a 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -49,6 +49,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" externalfake "sigs.k8s.io/cluster-api/controllers/external/fake" "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/cache" "sigs.k8s.io/cluster-api/util/conditions" @@ -1142,20 +1143,17 @@ func TestMachineV1Beta1Conditions(t *testing.T) { infraProvisioned: false, bootstrapDataSecretCreated: true, beforeFunc: func(_, infra *unstructured.Unstructured, _ *clusterv1.Machine) { - addConditionsToExternal(infra, clusterv1.Conditions{ - { - Type: clusterv1.ReadyV1Beta1Condition, - Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityInfo, - Reason: "Custom reason", - }, + addConditionToExternal(infra, metav1.Condition{ + Type: contract.InfrastructureMachine().ReadyConditionType(), + Status: metav1.ConditionFalse, + Reason: "Custom reason", }) }, conditionsToAssert: []metav1.Condition{ {Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionFalse, Reason: "Custom reason", Message: ""}, }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ - v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyV1Beta1Condition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyV1Beta1Condition, "Custom reason", "", ""), }, }, { @@ -1176,20 +1174,17 @@ func TestMachineV1Beta1Conditions(t *testing.T) { infraProvisioned: true, bootstrapDataSecretCreated: false, beforeFunc: func(bootstrap, _ *unstructured.Unstructured, _ *clusterv1.Machine) { - addConditionsToExternal(bootstrap, clusterv1.Conditions{ - { - Type: clusterv1.ReadyV1Beta1Condition, - Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityInfo, - Reason: "Custom reason", - }, + addConditionToExternal(bootstrap, metav1.Condition{ + Type: contract.Bootstrap().ReadyConditionType(), + Status: metav1.ConditionFalse, + Reason: "Custom reason", }) }, conditionsToAssert: []metav1.Condition{ {Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionFalse, Reason: "Custom reason", Message: ""}, }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ - v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyV1Beta1Condition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), + v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyV1Beta1Condition, "Custom reason", "", ""), }, }, { @@ -3574,13 +3569,15 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { } // adds a condition list to an external object. -func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { - existingConditions := clusterv1.Conditions{} - if cs := v1beta1conditions.UnstructuredGetter(u).GetV1Beta1Conditions(); len(cs) != 0 { - existingConditions = cs - } - existingConditions = append(existingConditions, newConditions...) - v1beta1conditions.UnstructuredSetter(u).SetV1Beta1Conditions(existingConditions) +func addConditionToExternal(u *unstructured.Unstructured, c metav1.Condition) { + unstructured.SetNestedSlice(u.Object, []interface{}{ + map[string]interface{}{ + "type": c.Type, + "status": string(c.Status), + "reason": c.Reason, + "message": c.Message, + }, + }, "status", "conditions") } // asserts the conditions set on the Getter object. diff --git a/internal/util/tree/tree.go b/internal/util/tree/tree.go index eb267268a0a7..b9bd6e29e911 100644 --- a/internal/util/tree/tree.go +++ b/internal/util/tree/tree.go @@ -220,7 +220,7 @@ func orderChildrenObjects(childrenObj []ctrlclient.Object) []ctrlclient.Object { func addObjectRowV1Beta1(prefix string, tbl *tablewriter.Table, objectTree *tree.ObjectTree, obj ctrlclient.Object) { // Gets the descriptor for the object's ready condition, if any. readyDescriptor := v1beta1ConditionDescriptor{readyColor: gray} - if ready := tree.GetReadyCondition(obj); ready != nil { + if ready := tree.GetV1Beta1ReadyCondition(obj); ready != nil { readyDescriptor = newV1Beta1ConditionDescriptor(ready) } @@ -294,7 +294,7 @@ func addOtherConditions(prefix string, tbl *tablewriter.Table, objectTree *tree. clusterv1.RemediatingCondition, ) - conditions := tree.GetAllV1Beta2Conditions(obj) + conditions := tree.GetConditions(obj) for i := range conditions { condition := conditions[i] positivePolarity := true @@ -350,7 +350,7 @@ func addOtherConditionsV1Beta1(prefix string, tbl *tablewriter.Table, objectTree childrenPipe = pipe } - otherConditions := tree.GetOtherConditions(obj) + otherConditions := tree.GetOtherV1Beta1Conditions(obj) for i := range otherConditions { otherCondition := otherConditions[i] otherDescriptor := newV1Beta1ConditionDescriptor(otherCondition) @@ -549,7 +549,7 @@ func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { v.upToDateCounters = fmt.Sprintf("%d", ptr.Deref(cp.UpToDateReplicas, 0)+ptr.Deref(w.UpToDateReplicas, 0)) } - if available := tree.GetAvailableV1Beta2Condition(obj); available != nil { + if available := tree.GetAvailableCondition(obj); available != nil { availableColor, availableStatus, availableAge, availableReason, availableMessage := conditionInfo(*available, true) v.status = availableColor.Sprintf("Available: %s", availableStatus) v.reason = availableReason @@ -572,7 +572,7 @@ func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { v.upToDateCounters = fmt.Sprintf("%d", *obj.Status.UpToDateReplicas) } - if available := tree.GetAvailableV1Beta2Condition(obj); available != nil { + if available := tree.GetAvailableCondition(obj); available != nil { availableColor, availableStatus, availableAge, availableReason, availableMessage := conditionInfo(*available, true) v.status = availableColor.Sprintf("Available: %s", availableStatus) v.reason = availableReason @@ -602,14 +602,14 @@ func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { v.replicas = "1" v.availableCounters = "0" - if available := tree.GetAvailableV1Beta2Condition(obj); available != nil { + if available := tree.GetAvailableCondition(obj); available != nil { if available.Status == metav1.ConditionTrue { v.availableCounters = "1" } } v.readyCounters = "0" - if ready := tree.GetReadyV1Beta2Condition(obj); ready != nil { + if ready := tree.GetReadyCondition(obj); ready != nil { readyColor, readyStatus, readyAge, readyReason, readyMessage := conditionInfo(*ready, true) v.status = readyColor.Sprintf("Ready: %s", readyStatus) v.reason = readyReason @@ -621,7 +621,7 @@ func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { } v.upToDateCounters = "0" - if upToDate := tree.GetMachineUpToDateV1Beta2Condition(obj); upToDate != nil { + if upToDate := tree.GetMachineUpToDateCondition(obj); upToDate != nil { if upToDate.Status == metav1.ConditionTrue { v.upToDateCounters = "1" } @@ -632,7 +632,7 @@ func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { // in case not all the conditions are visualized. // Also, if the Unstructured object implements the Cluster API control plane contract, surface // corresponding replica counters. - if ready := tree.GetReadyV1Beta2Condition(obj); ready != nil { + if ready := tree.GetReadyCondition(obj); ready != nil { readyColor, readyStatus, readyAge, readyReason, readyMessage := conditionInfo(*ready, true) v.status = readyColor.Sprintf("Ready: %s", readyStatus) v.reason = readyReason @@ -669,7 +669,7 @@ func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { v.upToDateCounters = fmt.Sprintf("%d", tree.GetGroupItemsUpToDateCounter(obj)) } - if ready := tree.GetReadyV1Beta2Condition(obj); ready != nil { + if ready := tree.GetReadyCondition(obj); ready != nil { readyColor, readyStatus, readyAge, readyReason, readyMessage := conditionInfo(*ready, true) v.status = readyColor.Sprintf("Ready: %s", readyStatus) v.reason = readyReason diff --git a/util/collections/machine_filters.go b/util/collections/machine_filters.go index eeb37270429b..ad1a7f7d3709 100644 --- a/util/collections/machine_filters.go +++ b/util/collections/machine_filters.go @@ -28,6 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) @@ -212,8 +213,7 @@ func IsReady() Func { if machine == nil { return false } - // TODO (v1beta2): test for v1beta2 conditions - return v1beta1conditions.IsTrue(machine, clusterv1.ReadyV1Beta1Condition) + return conditions.IsTrue(machine, clusterv1.MachineReadyCondition) } } diff --git a/util/patch/patch_test.go b/util/patch/patch_test.go index 00ea79401b0d..b4a6b511e47f 100644 --- a/util/patch/patch_test.go +++ b/util/patch/patch_test.go @@ -38,6 +38,7 @@ import ( ) func TestPatchHelper(t *testing.T) { + now := metav1.Now().Rfc3339Copy() ns, err := env.CreateNamespace(ctx, "test-patch-helper") if err != nil { t.Fatal(err) @@ -202,19 +203,19 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) t.Log("Validating the object has been updated") - g.Eventually(func() clusterv1.Conditions { + g.Eventually(func() []metav1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { - return clusterv1.Conditions{} + return []metav1.Condition{} } - return objAfter.Status.Deprecated.V1Beta1.Conditions - }, timeout).Should(v1beta1conditions.MatchConditions(obj.Status.Deprecated.V1Beta1.Conditions)) + return objAfter.Status.Conditions + }, timeout).Should(conditions.MatchConditions(obj.Status.Conditions)) }) t.Run("should recover if there is a resolvable conflict", func(t *testing.T) { @@ -238,7 +239,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking TestCondition=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") + conditions.Set(objCopy, metav1.Condition{Type: "TestCondition", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -249,7 +250,7 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -261,22 +262,22 @@ func TestPatchHelper(t *testing.T) { return false } - testConditionCopy := v1beta1conditions.Get(objCopy, "TestCondition") - testConditionAfter := v1beta1conditions.Get(objAfter, "TestCondition") + testConditionCopy := conditions.Get(objCopy, "TestCondition") + testConditionAfter := conditions.Get(objAfter, "TestCondition") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -306,7 +307,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking TestCondition=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("TestCondition"), "reason", clusterv1.ConditionSeverityInfo, "message") + conditions.Set(objCopy, metav1.Condition{Type: "TestCondition", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -321,7 +322,7 @@ func TestPatchHelper(t *testing.T) { obj.Spec.ControlPlaneEndpoint.Host = "test://endpoint" obj.Spec.ControlPlaneEndpoint.Port = 8443 obj.Status.Phase = "Provisioning" - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -333,22 +334,22 @@ func TestPatchHelper(t *testing.T) { return false } - testConditionCopy := v1beta1conditions.Get(objCopy, "TestCondition") - testConditionAfter := v1beta1conditions.Get(objAfter, "TestCondition") + testConditionCopy := conditions.Get(objCopy, "TestCondition") + testConditionAfter := conditions.Get(objAfter, "TestCondition") if testConditionCopy == nil || testConditionAfter == nil { return false } - ok, err := v1beta1conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) + ok, err := conditions.MatchCondition(*testConditionCopy).Match(*testConditionAfter) if err != nil || !ok { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := conditions.Get(obj, "Ready") + readyAfter := conditions.Get(objAfter, "Ready") if readyBefore == nil || readyAfter == nil { return false } - ok, err = v1beta1conditions.MatchCondition(*readyBefore).Match(*readyAfter) + ok, err = conditions.MatchCondition(*readyBefore).Match(*readyAfter) if err != nil || !ok { return false } @@ -380,7 +381,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -391,7 +392,7 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -403,8 +404,8 @@ func TestPatchHelper(t *testing.T) { return false } - for _, afterCondition := range objAfter.Status.Deprecated.V1Beta1.Conditions { - ok, err := v1beta1conditions.MatchCondition(objCopy.Status.Deprecated.V1Beta1.Conditions[0]).Match(afterCondition) + for _, afterCondition := range objAfter.Status.Conditions { + ok, err := conditions.MatchCondition(objCopy.Status.Conditions[0]).Match(afterCondition) if err == nil && ok { return true } @@ -435,7 +436,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -446,21 +447,21 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyV1Beta1Condition}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - g.Eventually(func() clusterv1.Condition { + readyBefore := conditions.Get(obj, "Ready") + g.Eventually(func() metav1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { - return clusterv1.Condition{} + return metav1.Condition{} } - return *v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) - }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) + return *conditions.Get(objAfter, "Ready") + }, timeout).Should(conditions.MatchCondition(*readyBefore)) }) t.Run("should not return an error if there is an unresolvable conflict when force overwrite is enabled", func(t *testing.T) { @@ -484,7 +485,7 @@ func TestPatchHelper(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -495,21 +496,21 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithForceOverwriteConditions{})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - g.Eventually(func() clusterv1.Condition { + readyBefore := conditions.Get(obj, "Ready") + g.Eventually(func() metav1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { - return clusterv1.Condition{} + return metav1.Condition{} } - return *v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) - }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) + return *conditions.Get(objAfter, "Ready") + }, timeout).Should(conditions.MatchCondition(*readyBefore)) }) }) }) @@ -722,7 +723,7 @@ func TestPatchHelper(t *testing.T) { } t.Log("Setting Ready condition") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -735,7 +736,7 @@ func TestPatchHelper(t *testing.T) { } return cmp.Equal(obj.Status.Initialization, objAfter.Status.Initialization) && - v1beta1conditions.IsTrue(objAfter, clusterv1.ReadyV1Beta1Condition) && + conditions.IsTrue(objAfter, "Ready") && cmp.Equal(obj.Spec, objAfter.Spec) }, timeout).Should(BeTrue()) }) @@ -1051,7 +1052,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1090,7 +1091,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, Clusterv1ConditionsFieldPath{"status", "conditions"})).To(Succeed()) @@ -1137,7 +1138,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1159,8 +1160,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBefore == nil || readyAfter == nil { return false } @@ -1207,7 +1208,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Changing the object spec, status, and adding Ready=True condition") obj.Spec.Foo = "foo" obj.Status.Bar = "bat" - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).To(Succeed()) @@ -1229,8 +1230,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBefore == nil || readyAfter == nil { return false } @@ -1265,7 +1266,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1276,7 +1277,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -1320,7 +1321,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1331,20 +1332,20 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyV1Beta1Condition}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ConditionType("Ready")}})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) g.Eventually(func() clusterv1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { return clusterv1.Condition{} } - return *v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + return *v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) }) @@ -1369,7 +1370,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking Ready=False") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1380,20 +1381,20 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithForceOverwriteConditions{})).To(Succeed()) t.Log("Validating the object has been updated") - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) g.Eventually(func() clusterv1.Condition { objAfter := obj.DeepCopy() if err := env.Get(ctx, key, objAfter); err != nil { return clusterv1.Condition{} } - return *v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + return *v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) }, timeout).Should(v1beta1conditions.MatchCondition(*readyBefore)) }) }) @@ -1431,7 +1432,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1496,7 +1497,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1552,7 +1553,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking clusterv1.conditions and metav1.conditions Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1575,8 +1576,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBefore == nil || readyAfter == nil { return false } @@ -1644,7 +1645,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Changing the object spec, status, and marking clusterv1.condition and metav1.conditions Ready=True") obj.Spec.Foo = "foo" obj.Status.Bar = "bat" - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1668,8 +1669,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBefore == nil || readyAfter == nil { return false } @@ -1724,7 +1725,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -1735,7 +1736,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -1818,7 +1819,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready clusterv1.condition and metav1.conditions to be false") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -1830,11 +1831,11 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready clusterv1.condition and metav1.conditions True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyV1Beta1Condition}}, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ConditionType("Ready")}}, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) t.Log("Validating the object has been updated") g.Eventually(func() bool { @@ -1843,8 +1844,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBefore == nil || readyAfter == nil { return false } @@ -1888,7 +1889,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready clusterv1.condition and metav1.conditions to be false") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -1900,7 +1901,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready clusterv1.condition and metav1.conditions True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -1913,8 +1914,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBefore == nil || readyAfter == nil { return false } @@ -1971,7 +1972,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition and back compatibility condition Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2036,7 +2037,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition and back compatibility condition Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2092,7 +2093,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking condition and back compatibility condition Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2115,8 +2116,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } @@ -2184,7 +2185,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { t.Log("Changing the object spec, status, and marking condition and back compatibility condition Ready=True") obj.Spec.Foo = "foo" obj.Status.Bar = "bat" - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2208,8 +2209,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } @@ -2264,7 +2265,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition to be false") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) t.Log("Validating that the local object's resource version is behind") @@ -2275,7 +2276,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready=True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj)).NotTo(Succeed()) @@ -2358,7 +2359,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition and back compatibility condition to be false") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -2370,11 +2371,11 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready condition and back compatibility condition True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") - g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ReadyV1Beta1Condition}}, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) + g.Expect(patcher.Patch(ctx, obj, WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{clusterv1.ConditionType("Ready")}}, WithOwnedConditions{Conditions: []string{"Ready"}})).To(Succeed()) t.Log("Validating the object has been updated") g.Eventually(func() bool { @@ -2383,8 +2384,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } @@ -2428,7 +2429,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { objCopy := obj.DeepCopy() t.Log("Marking a Ready condition and back compatibility condition to be false") - v1beta1conditions.MarkFalse(objCopy, clusterv1.ReadyV1Beta1Condition, "reason", clusterv1.ConditionSeverityInfo, "message") + v1beta1conditions.MarkFalse(objCopy, clusterv1.ConditionType("Ready"), "reason", clusterv1.ConditionSeverityInfo, "message") conditions.Set(objCopy, metav1.Condition{Type: "Ready", Status: metav1.ConditionFalse, Reason: "NotGood", LastTransitionTime: now}) g.Expect(env.Status().Update(ctx, objCopy)).To(Succeed()) @@ -2440,7 +2441,7 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Marking Ready condition and back compatibility condition True") - v1beta1conditions.MarkTrue(obj, clusterv1.ReadyV1Beta1Condition) + v1beta1conditions.MarkTrue(obj, clusterv1.ConditionType("Ready")) conditions.Set(obj, metav1.Condition{Type: "Ready", Status: metav1.ConditionTrue, Reason: "AllGood", LastTransitionTime: now}) t.Log("Patching the object") @@ -2453,8 +2454,8 @@ func TestPatchHelperForV1beta2Transition(t *testing.T) { return false } - readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ReadyV1Beta1Condition) - readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ReadyV1Beta1Condition) + readyBackCompatibilityBefore := v1beta1conditions.Get(obj, clusterv1.ConditionType("Ready")) + readyBackCompatibilityAfter := v1beta1conditions.Get(objAfter, clusterv1.ConditionType("Ready")) if readyBackCompatibilityBefore == nil || readyBackCompatibilityAfter == nil { return false } From 02e935991c0afb1ecdea9e78ea894eb9b6388cd5 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Tue, 22 Apr 2025 16:05:08 +0200 Subject: [PATCH 12/20] Stop using clusterv1.MachineOwnerRemediatedV1Beta1Condition and clusterv1.MachineHealthCheckSucceededV1Beta1Condition in controllers --- .../kubeadm/internal/control_plane_test.go | 57 ++++- .../internal/controllers/remediation_test.go | 60 ++--- .../internal/controllers/status_test.go | 25 +- .../controllers/cluster/cluster_controller.go | 8 +- .../cluster/cluster_controller_status.go | 42 ++-- .../cluster/cluster_controller_status_test.go | 238 +++++++++--------- .../cluster/cluster_controller_test.go | 2 +- .../machine/machine_controller_test.go | 5 +- .../machinedeployment_status_test.go | 79 +++--- .../machinehealthcheck_controller.go | 4 +- .../machinehealthcheck_controller_test.go | 14 +- .../machinehealthcheck_targets_test.go | 24 +- .../machineset/machineset_controller.go | 13 +- .../machineset_controller_status_test.go | 87 +++---- .../machineset/machineset_controller_test.go | 159 ------------ .../machineset/machineset_delete_policy.go | 6 +- .../machineset_delete_policy_test.go | 24 +- util/collections/machine_filters.go | 6 +- util/collections/machine_filters_test.go | 22 +- 19 files changed, 354 insertions(+), 521 deletions(-) diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index fe862ea95d9f..ef93ca97131a 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -30,7 +30,6 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func TestControlPlane(t *testing.T) { @@ -151,14 +150,32 @@ func TestHasMachinesToBeRemediated(t *testing.T) { healthyMachineNotProvisioned := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachine1"}} // healthy machine (with MachineHealthCheckSucceded == true) healthyMachineProvisioned := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "healthyMachine2"}, Status: clusterv1.MachineStatus{NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "node1"}}} - v1beta1conditions.MarkTrue(healthyMachineProvisioned, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) + healthyMachineProvisioned.SetConditions([]metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionTrue, + }, + }) // unhealthy machine NOT eligible for KCP remediation (with MachineHealthCheckSucceded == False, but without MachineOwnerRemediated condition) unhealthyMachineNOTOwnerRemediated := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineNOTOwnerRemediated"}, Status: clusterv1.MachineStatus{NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "node2"}}} - v1beta1conditions.MarkFalse(unhealthyMachineNOTOwnerRemediated, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Something is wrong") + unhealthyMachineNOTOwnerRemediated.SetConditions([]metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, + }, + }) // unhealthy machine eligible for KCP remediation (with MachineHealthCheckSucceded == False, with MachineOwnerRemediated condition) unhealthyMachineOwnerRemediated := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineOwnerRemediated"}, Status: clusterv1.MachineStatus{NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "node3"}}} - v1beta1conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Something is wrong") - v1beta1conditions.MarkFalse(unhealthyMachineOwnerRemediated, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + unhealthyMachineOwnerRemediated.SetConditions([]metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, + }, + { + Type: clusterv1.MachineOwnerRemediatedCondition, + Status: metav1.ConditionFalse, + }, + }) t.Run("One unhealthy machine to be remediated by KCP", func(t *testing.T) { c := ControlPlane{ @@ -219,14 +236,30 @@ func TestHasHealthyMachineStillProvisioning(t *testing.T) { // unhealthy machine (with MachineHealthCheckSucceded condition) still provisioning (without NodeRef) unhealthyMachineStillProvisioning1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineStillProvisioning1"}} - v1beta1conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Something is wrong") - v1beta1conditions.MarkFalse(unhealthyMachineStillProvisioning1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + unhealthyMachineStillProvisioning1.SetConditions([]metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, + }, + { + Type: clusterv1.MachineOwnerRemediatedCondition, + Status: metav1.ConditionFalse, + }, + }) // unhealthy machine (with MachineHealthCheckSucceded condition) provisioned (with NodeRef) unhealthyMachineProvisioned1 := &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "unhealthyMachineProvisioned1"}} unhealthyMachineProvisioned1.Status.NodeRef = &corev1.ObjectReference{} - v1beta1conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Something is wrong") - v1beta1conditions.MarkFalse(unhealthyMachineProvisioned1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KCP should remediate this issue") + unhealthyMachineProvisioned1.SetConditions([]metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, + }, + { + Type: clusterv1.MachineOwnerRemediatedCondition, + Status: metav1.ConditionFalse, + }, + }) t.Run("Healthy machine still provisioning", func(t *testing.T) { c := ControlPlane{ @@ -302,9 +335,9 @@ func TestStatusToLogKeyAndValues(t *testing.T) { machineMarkedForRemediation := healthyMachine.DeepCopy() machineMarkedForRemediation.Name = "marked-for-remediation" - machineMarkedForRemediation.Status.Deprecated.V1Beta1.Conditions = append(machineMarkedForRemediation.Status.Deprecated.V1Beta1.Conditions, - clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, Status: corev1.ConditionFalse}, - clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, Status: corev1.ConditionFalse}, + machineMarkedForRemediation.Status.Conditions = append(machineMarkedForRemediation.Status.Conditions, + metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionFalse}, + metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse}, ) g := NewWithT(t) diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index 4e3c60645a5a..fcb8571519c2 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -251,7 +251,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m) g.Expect(err).ToNot(HaveOccurred()) @@ -323,7 +323,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate while waiting for a version upgrade to v1.20.1 to be propagated from Cluster.spec.topology") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane can't remediate while waiting for a version upgrade to v1.20.1 to be propagated from Cluster.spec.topology") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane can't remediate while waiting for a version upgrade to v1.20.1 to be propagated from Cluster.spec.topology") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -374,7 +374,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -430,7 +430,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -489,7 +489,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -542,7 +542,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -590,7 +590,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") - assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") + assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") g.Expect(env.Cleanup(ctx, m)).To(Succeed()) }) @@ -622,7 +622,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane waiting for control plane Machine deletion to complete before triggering remediation") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane waiting for control plane Machine deletion to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) @@ -654,7 +654,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) @@ -687,7 +687,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredReason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) @@ -732,7 +732,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) @@ -779,7 +779,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) }) @@ -829,7 +829,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -882,7 +882,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -918,7 +918,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(i - 1)) assertMachineV1beta1Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: mi.Namespace, Name: mi.Name}, mi) g.Expect(err).ToNot(HaveOccurred()) @@ -976,7 +976,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1031,7 +1031,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1086,7 +1086,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1142,7 +1142,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1198,7 +1198,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1249,7 +1249,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationFailedV1Beta1Reason, clusterv1.ConditionSeverityWarning, "A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to. Skipping remediation") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedReason, "KubeadmControlPlane can't remediate this Machine because there is no healthy Machine to forward etcd leadership to") removeFinalizer(g, m1) @@ -1301,7 +1301,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1337,7 +1337,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(i - 4)) assertMachineV1beta1Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: mi.Namespace, Name: mi.Name}, mi) g.Expect(err).ToNot(HaveOccurred()) @@ -1410,7 +1410,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1446,7 +1446,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(1)) assertMachineV1beta1Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1524,7 +1524,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineV1beta1Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m2) g.Expect(err).ToNot(HaveOccurred()) @@ -1561,7 +1561,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(1)) assertMachineV1beta1Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m3.Namespace, Name: m3.Name}, m3) g.Expect(err).ToNot(HaveOccurred()) @@ -1642,8 +1642,8 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { assertMachineV1beta1Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.RemediationInProgressV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") assertMachineV1beta1Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedV1Beta1Condition, corev1.ConditionFalse, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") - assertMachineV1beta2Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, clusterv1.MachineOwnerRemediatedWaitingForRemediationReason, "Waiting for remediation") + assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, "Machine is deleting") + assertMachineCondition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, metav1.ConditionFalse, clusterv1.MachineOwnerRemediatedWaitingForRemediationReason, "Waiting for remediation") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m2) g.Expect(err).ToNot(HaveOccurred()) @@ -2232,7 +2232,7 @@ func assertMachineV1beta1Condition(ctx context.Context, g *WithT, m *clusterv1.M }, 10*time.Second).Should(Succeed()) } -func assertMachineV1beta2Condition(ctx context.Context, g *WithT, m *clusterv1.Machine, t string, status metav1.ConditionStatus, reason string, message string) { +func assertMachineCondition(ctx context.Context, g *WithT, m *clusterv1.Machine, t string, status metav1.ConditionStatus, reason string, message string) { g.Eventually(func() error { if err := env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m); err != nil { return err diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index a3fa720487dc..7dba0f58d946 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -660,10 +660,9 @@ func Test_setMachinesReadyAndMachinesUpToDateConditions(t *testing.T) { } func Test_setRemediatingCondition(t *testing.T) { - healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionTrue} - healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionFalse} - ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} + healthCheckSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionTrue} + healthCheckNotSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionFalse} + ownerRemediated := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} tests := []struct { name string @@ -690,9 +689,9 @@ func Test_setRemediatingCondition(t *testing.T) { controlPlane: &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{}, Machines: collections.FromMachines( - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckSucceeded}}}}}, // Healthy machine - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m2"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckNotSucceeded}}}}}, // Unhealthy machine, not yet marked for remediation - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m3"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckNotSucceeded, ownerRemediated}}}, Conditions: []metav1.Condition{ownerRemediatedV1Beta2}}}, + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckSucceeded}}}, // Healthy machine + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m2"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckNotSucceeded}}}, // Unhealthy machine, not yet marked for remediation + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m3"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckNotSucceeded, ownerRemediated}}}, ), }, expectCondition: metav1.Condition{ @@ -707,9 +706,9 @@ func Test_setRemediatingCondition(t *testing.T) { controlPlane: &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{}, Machines: collections.FromMachines( - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckSucceeded}}}}}, // Healthy machine - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m2"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckNotSucceeded}}}}}, // Unhealthy machine, not yet marked for remediation - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m3"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckSucceeded}}}}}, // Healthy machine + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckSucceeded}}}, // Healthy machine + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m2"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckNotSucceeded}}}, // Unhealthy machine, not yet marked for remediation + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m3"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckSucceeded}}}, // Healthy machine ), }, expectCondition: metav1.Condition{ @@ -724,9 +723,9 @@ func Test_setRemediatingCondition(t *testing.T) { controlPlane: &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{}, Machines: collections.FromMachines( - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckNotSucceeded}}}}}, // Unhealthy machine, not yet marked for remediation - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m2"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckNotSucceeded}}}}}, // Unhealthy machine, not yet marked for remediation - &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m3"}, Status: clusterv1.MachineStatus{Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{Conditions: clusterv1.Conditions{healthCheckSucceeded}}}}}, // Healthy machine + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m1"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckNotSucceeded}}}, // Unhealthy machine, not yet marked for remediation + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m2"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckNotSucceeded}}}, // Unhealthy machine, not yet marked for remediation + &clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "m3"}, Status: clusterv1.MachineStatus{Conditions: []metav1.Condition{healthCheckSucceeded}}}, // Healthy machine ), }, expectCondition: metav1.Condition{ diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 8e86148e4817..1bfff868151c 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -154,7 +154,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ct return ctrl.Result{}, err } - // Add finalizer first if not set to avoid the race condition between init and delete. + // Add finalizer first if not set to avoid the race v1beta1Condition between init and delete. if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, cluster, clusterv1.ClusterFinalizer); err != nil || finalizerAdded { return ctrl.Result{}, err } @@ -243,7 +243,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ct // Handle normal reconciliation loop. if cluster.Spec.Topology != nil { if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.InfrastructureRef == nil { - // TODO: add a condition to surface this scenario + // TODO: add a v1beta1Condition to surface this scenario log.Info("Waiting for the topology to be generated") return ctrl.Result{}, nil } @@ -351,10 +351,10 @@ type scope struct { // getDescendantsSucceeded documents if getDescendants succeeded. getDescendantsSucceeded bool - // deletingReason is the reason that should be used when setting the Deleting condition. + // deletingReason is the reason that should be used when setting the Deleting v1beta1Condition. deletingReason string - // deletingMessage is the message that should be used when setting the Deleting condition. + // deletingMessage is the message that should be used when setting the Deleting v1beta1Condition. deletingMessage string } diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index 6ba6d30dbb22..b21c6ec395c5 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -56,7 +56,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) error { // TODO: "expv1.MachinePoolList{}" below should be replaced through "s.descendants.machinePools" once replica counters // and Available, ScalingUp and ScalingDown conditions have been implemented for MachinePools. - // TODO: This should be removed once the UpToDate condition has been implemented for MachinePool Machines + // TODO: This should be removed once the UpToDate v1beta1Condition has been implemented for MachinePool Machines isMachinePoolMachine := func(machine *clusterv1.Machine) bool { _, isMachinePoolMachine := machine.Labels[clusterv1.MachinePoolNameLabel] return isMachinePoolMachine @@ -282,7 +282,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust return } - // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition + // In case v1beta1Condition has NoReasonReported and status true, we assume it is a v1beta1 v1beta1Condition // and replace the reason with something less confusing. if ready.Reason == conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { ready.Reason = clusterv1.ClusterInfrastructureReadyReason @@ -383,7 +383,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu return } - // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition + // In case v1beta1Condition has NoReasonReported and status true, we assume it is a v1beta1 v1beta1Condition // and replace the reason with something less confusing. if available.Reason == conditions.NoReasonReported && available.Status == metav1.ConditionTrue { available.Reason = clusterv1.ClusterControlPlaneAvailableReason @@ -640,9 +640,9 @@ func setWorkerMachinesReadyCondition(ctx context.Context, cluster *clusterv1.Clu } func setControlPlaneMachinesUpToDateCondition(ctx context.Context, cluster *clusterv1.Cluster, machines collections.Machines, getDescendantsSucceeded bool) { - // Only consider Machines that have an UpToDate condition or are older than 10s. - // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, - // because it can take a bit until the UpToDate condition is set on a new Machine. + // Only consider Machines that have an UpToDate v1beta1Condition or are older than 10s. + // This is done to ensure the MachinesUpToDate v1beta1Condition doesn't flicker after a new Machine is created, + // because it can take a bit until the UpToDate v1beta1Condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { return conditions.Has(machine, clusterv1.MachineUpToDateCondition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) @@ -659,9 +659,9 @@ func setControlPlaneMachinesUpToDateCondition(ctx context.Context, cluster *clus } func setWorkerMachinesUpToDateCondition(ctx context.Context, cluster *clusterv1.Cluster, machines collections.Machines, getDescendantsSucceeded bool) { - // Only consider Machines that have an UpToDate condition or are older than 10s. - // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, - // because it can take a bit until the UpToDate condition is set on a new Machine. + // Only consider Machines that have an UpToDate v1beta1Condition or are older than 10s. + // This is done to ensure the MachinesUpToDate v1beta1Condition doesn't flicker after a new Machine is created, + // because it can take a bit until the UpToDate v1beta1Condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { return conditions.Has(machine, clusterv1.MachineUpToDateCondition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) @@ -764,7 +764,7 @@ func setRemediatingCondition(ctx context.Context, cluster *clusterv1.Cluster, ma machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedCondition, conditions.TargetConditionType(clusterv1.ClusterRemediatingCondition), // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc - // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). + // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the v1beta1Condition). ) if err != nil { conditions.Set(cluster, metav1.Condition{ @@ -803,7 +803,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con ws := make([]aggregationWrapper, 0, len(machinePools.Items)+len(machineDeployments.Items)+1) if controlPlane != nil { - // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) + // control plane is considered only if it is reporting the v1beta1Condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions RollingOut not yet reported from ...". if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.RollingOutCondition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) @@ -828,7 +828,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con rollingOutCondition, err := conditions.NewAggregateCondition( ws, clusterv1.RollingOutCondition, conditions.TargetConditionType(clusterv1.ClusterRollingOutCondition), - // Instruct aggregate to consider RollingOut condition with negative polarity. + // Instruct aggregate to consider RollingOut v1beta1Condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.RollingOutCondition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the RollingOut has negative polarity. @@ -874,7 +874,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont ws := make([]aggregationWrapper, 0, len(machinePools.Items)+len(machineDeployments.Items)+1) if controlPlane != nil { - // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) + // control plane is considered only if it is reporting the v1beta1Condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions ScalingUp not yet reported from ...". if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.ScalingUpCondition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) @@ -905,7 +905,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont scalingUpCondition, err := conditions.NewAggregateCondition( ws, clusterv1.ScalingUpCondition, conditions.TargetConditionType(clusterv1.ClusterScalingUpCondition), - // Instruct aggregate to consider ScalingUp condition with negative polarity. + // Instruct aggregate to consider ScalingUp v1beta1Condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.ScalingUpCondition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the ScalingUp has negative polarity. @@ -951,7 +951,7 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co ws := make([]aggregationWrapper, 0, len(machinePools.Items)+len(machineDeployments.Items)+1) if controlPlane != nil { - // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) + // control plane is considered only if it is reporting the v1beta1Condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions ScalingDown not yet reported from ...". if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.ScalingDownCondition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) @@ -982,7 +982,7 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co scalingDownCondition, err := conditions.NewAggregateCondition( ws, clusterv1.ScalingDownCondition, conditions.TargetConditionType(clusterv1.ClusterScalingDownCondition), - // Instruct aggregate to consider ScalingDown condition with negative polarity. + // Instruct aggregate to consider ScalingDown v1beta1Condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.ScalingDownCondition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the ScalingDown has negative polarity. @@ -1048,7 +1048,7 @@ func (c clusterConditionCustomMergeStrategy) Merge(operation conditions.MergeOpe } } - // Treat all reasons except TopologyReconcileFailed and ClusterClassNotReconciled of TopologyReconciled condition as info. + // Treat all reasons except TopologyReconcileFailed and ClusterClassNotReconciled of TopologyReconciled v1beta1Condition as info. if condition.Type == clusterv1.ClusterTopologyReconciledCondition && condition.Status == metav1.ConditionFalse && condition.Reason != clusterv1.ClusterTopologyReconciledFailedReason && condition.Reason != clusterv1.ClusterTopologyReconciledClusterClassNotReconciledReason { return conditions.InfoMergePriority @@ -1088,14 +1088,14 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clus summaryOpts := []conditions.SummaryOption{ forConditionTypes, - // Instruct summary to consider Deleting condition with negative polarity. + // Instruct summary to consider Deleting v1beta1Condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.ClusterDeletingCondition}, // Using a custom merge strategy to override reasons applied during merge and to ignore some - // info message so the available condition is less noisy. + // info message so the available v1beta1Condition is less noisy. conditions.CustomMergeStrategy{ MergeStrategy: clusterConditionCustomMergeStrategy{ cluster: cluster, - // Instruct merge to consider Deleting condition with negative polarity, + // Instruct merge to consider Deleting v1beta1Condition with negative polarity, negativePolarityConditionTypes: negativePolarityConditionTypes, }, }, @@ -1109,7 +1109,7 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clus if err != nil { // Note, this could only happen if we hit edge cases in computing the summary, which should not happen due to the fact // that we are passing a non empty list of ForConditionTypes. - log.Error(err, "Failed to set Available condition") + log.Error(err, "Failed to set Available v1beta1Condition") availableCondition = &metav1.Condition{ Type: clusterv1.ClusterAvailableCondition, Status: metav1.ConditionUnknown, diff --git a/internal/controllers/cluster/cluster_controller_status_test.go b/internal/controllers/cluster/cluster_controller_status_test.go index 88fb1e0fd29c..0b98fc0c073c 100644 --- a/internal/controllers/cluster/cluster_controller_status_test.go +++ b/internal/controllers/cluster/cluster_controller_status_test.go @@ -89,9 +89,9 @@ func TestSetControlPlaneReplicas(t *testing.T) { cluster: fakeCluster("c"), getDescendantsSucceeded: true, machines: collections.FromMachines( - fakeMachine("cp1", v1beta2Condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), - fakeMachine("cp2", v1beta2Condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionFalse}, v1beta2Condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), - fakeMachine("cp3", v1beta2Condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionFalse}, v1beta2Condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse}, v1beta2Condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), + fakeMachine("cp1", condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), + fakeMachine("cp2", condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionFalse}, condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), + fakeMachine("cp3", condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionFalse}, condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse}, condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), fakeMachine("cp4"), fakeMachine("cp5"), ), @@ -176,9 +176,9 @@ func TestSetWorkersReplicas(t *testing.T) { *fakeMachineSet("ms2", desiredReplicas(31), currentReplicas(32), v1beta2ReadyReplicas(33), v1beta2AvailableReplicas(34), v1beta2UpToDateReplicas(35)), // not owned by the cluster }}, workerMachines: collections.FromMachines( // 4 replicas, 2 Ready, 3 Available, 1 UpToDate - fakeMachine("m1", OwnedByCluster("c"), v1beta2Condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), - fakeMachine("m2", OwnedByCluster("c"), v1beta2Condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse}, v1beta2Condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse}), - fakeMachine("m3", OwnedByCluster("c"), v1beta2Condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, v1beta2Condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse}), + fakeMachine("m1", OwnedByCluster("c"), condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), + fakeMachine("m2", OwnedByCluster("c"), condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse}, condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse}), + fakeMachine("m3", OwnedByCluster("c"), condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse}), fakeMachine("m4", OwnedByCluster("c")), fakeMachine("m5"), // not owned by the cluster ), @@ -238,7 +238,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { }, }, { - name: "mirror Ready condition from infra cluster", + name: "mirror Ready v1beta1Condition from infra cluster", cluster: fakeCluster("c", infrastructureRef{Kind: "FakeInfraCluster"}), infraCluster: fakeInfraCluster("i1", condition{Type: "Ready", Status: "False", Message: "some message", Reason: "SomeReason"}), infraClusterIsNotFound: false, @@ -250,7 +250,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { }, }, { - name: "mirror Ready condition from infra cluster (true)", + name: "mirror Ready v1beta1Condition from infra cluster (true)", cluster: fakeCluster("c", infrastructureRef{Kind: "FakeInfraCluster"}), infraCluster: fakeInfraCluster("i1", condition{Type: "Ready", Status: "True", Message: "some message"}), // reason not set for v1beta1 conditions infraClusterIsNotFound: false, @@ -262,7 +262,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { }, }, { - name: "Use status.InfrastructureReady flag as a fallback Ready condition from infra cluster is missing", + name: "Use status.InfrastructureReady flag as a fallback Ready v1beta1Condition from infra cluster is missing", cluster: fakeCluster("c", infrastructureRef{Kind: "FakeInfraCluster"}), infraCluster: fakeInfraCluster("i1", provisioned(false)), infraClusterIsNotFound: false, @@ -274,7 +274,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { }, }, { - name: "Use status.InfrastructureReady flag as a fallback Ready condition from infra cluster is missing (ready true)", + name: "Use status.InfrastructureReady flag as a fallback Ready v1beta1Condition from infra cluster is missing (ready true)", cluster: fakeCluster("c", infrastructureRef{Kind: "FakeInfraCluster"}, infrastructureProvisioned(true)), infraCluster: fakeInfraCluster("i1", provisioned(true)), infraClusterIsNotFound: false, @@ -285,7 +285,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { }, }, { - name: "invalid Ready condition from infra cluster", + name: "invalid Ready v1beta1Condition from infra cluster", cluster: fakeCluster("c", infrastructureRef{Kind: "FakeInfraCluster"}), infraCluster: fakeInfraCluster("i1", condition{Type: "Ready"}), infraClusterIsNotFound: false, @@ -403,7 +403,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { }, }, { - name: "mirror Available condition from control plane", + name: "mirror Available v1beta1Condition from control plane", cluster: fakeCluster("c", controlPlaneRef{Kind: "FakeControlPlane"}), controlPlane: fakeControlPlane("cp1", condition{Type: "Available", Status: "False", Message: "some message", Reason: "SomeReason"}), controlPlaneIsNotFound: false, @@ -415,7 +415,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { }, }, { - name: "mirror Available condition from control plane (true)", + name: "mirror Available v1beta1Condition from control plane (true)", cluster: fakeCluster("c", controlPlaneRef{Kind: "FakeControlPlane"}), controlPlane: fakeControlPlane("cp1", condition{Type: "Available", Status: "True", Message: "some message"}), // reason not set for v1beta1 conditions controlPlaneIsNotFound: false, @@ -427,7 +427,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { }, }, { - name: "Use status.controlPlaneReady flag as a fallback Available condition from control plane is missing", + name: "Use status.controlPlaneReady flag as a fallback Available v1beta1Condition from control plane is missing", cluster: fakeCluster("c", controlPlaneRef{Kind: "FakeControlPlane"}), controlPlane: fakeControlPlane("cp1", initialized(false)), controlPlaneIsNotFound: false, @@ -439,7 +439,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { }, }, { - name: "Use status.controlPlaneReady flag as a fallback Available condition from control plane is missing (ready true)", + name: "Use status.controlPlaneReady flag as a fallback Available v1beta1Condition from control plane is missing (ready true)", cluster: fakeCluster("c", controlPlaneRef{Kind: "FakeControlPlane"}, controlPlaneInitialized(true)), controlPlane: fakeControlPlane("cp1", initialized(true)), controlPlaneIsNotFound: false, @@ -450,7 +450,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { }, }, { - name: "invalid Available condition from control plane", + name: "invalid Available v1beta1Condition from control plane", cluster: fakeCluster("c", controlPlaneRef{Kind: "FakeControlPlane"}), controlPlane: fakeControlPlane("cp1", condition{Type: "Available"}), controlPlaneIsNotFound: false, @@ -651,7 +651,7 @@ func TestSetControlPlaneInitialized(t *testing.T) { }, { name: "initialized never flips back to false", - cluster: fakeCluster("c", controlPlaneRef{Kind: "FakeControlPlane"}, v1beta2Condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedReason}), + cluster: fakeCluster("c", controlPlaneRef{Kind: "FakeControlPlane"}, condition{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedReason}), controlPlane: fakeControlPlane("cp", initialized(false)), expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneInitializedCondition, @@ -725,7 +725,7 @@ func TestSetWorkersAvailableCondition(t *testing.T) { name: "descendants report available", cluster: fakeCluster("c", controlPlaneRef{}), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.MachineDeploymentAvailableCondition, Status: metav1.ConditionFalse, Reason: "Foo", @@ -733,7 +733,7 @@ func TestSetWorkersAvailableCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.MachineDeploymentAvailableCondition, Status: metav1.ConditionFalse, Reason: "Foo", @@ -804,8 +804,8 @@ func TestSetControlPlaneMachinesReadyCondition(t *testing.T) { name: "all machines are ready", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("machine-1", controlPlane(true), v1beta2Condition(readyCondition)), - fakeMachine("machine-2", controlPlane(true), v1beta2Condition(readyCondition)), + fakeMachine("machine-1", controlPlane(true), condition(readyCondition)), + fakeMachine("machine-2", controlPlane(true), condition(readyCondition)), }, getDescendantsSucceeded: true, expectCondition: metav1.Condition{ @@ -818,7 +818,7 @@ func TestSetControlPlaneMachinesReadyCondition(t *testing.T) { name: "one ready, one has nothing reported", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("machine-1", controlPlane(true), v1beta2Condition(readyCondition)), + fakeMachine("machine-1", controlPlane(true), condition(readyCondition)), fakeMachine("machine-2", controlPlane(true)), }, getDescendantsSucceeded: true, @@ -833,20 +833,20 @@ func TestSetControlPlaneMachinesReadyCondition(t *testing.T) { name: "one ready, one reporting not ready, one reporting unknown, one reporting deleting", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("machine-1", controlPlane(true), v1beta2Condition(readyCondition)), - fakeMachine("machine-2", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("machine-1", controlPlane(true), condition(readyCondition)), + fakeMachine("machine-2", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: "SomeReason", Message: "HealthCheckSucceeded: Some message", })), - fakeMachine("machine-3", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("machine-3", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionUnknown, Reason: "SomeUnknownReason", Message: "Some unknown message", })), - fakeMachine("machine-4", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("machine-4", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeletingReason, @@ -922,8 +922,8 @@ func TestSetWorkerMachinesReadyCondition(t *testing.T) { name: "all machines are ready", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("machine-1", v1beta2Condition(readyCondition)), - fakeMachine("machine-2", v1beta2Condition(readyCondition)), + fakeMachine("machine-1", condition(readyCondition)), + fakeMachine("machine-2", condition(readyCondition)), }, getDescendantsSucceeded: true, expectCondition: metav1.Condition{ @@ -936,7 +936,7 @@ func TestSetWorkerMachinesReadyCondition(t *testing.T) { name: "one ready, one has nothing reported", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("machine-1", v1beta2Condition(readyCondition)), + fakeMachine("machine-1", condition(readyCondition)), fakeMachine("machine-2"), }, getDescendantsSucceeded: true, @@ -951,20 +951,20 @@ func TestSetWorkerMachinesReadyCondition(t *testing.T) { name: "one ready, one reporting not ready, one reporting unknown, one reporting deleting", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("machine-1", v1beta2Condition(readyCondition)), - fakeMachine("machine-2", v1beta2Condition(metav1.Condition{ + fakeMachine("machine-1", condition(readyCondition)), + fakeMachine("machine-2", condition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: "SomeReason", Message: "HealthCheckSucceeded: Some message", })), - fakeMachine("machine-3", v1beta2Condition(metav1.Condition{ + fakeMachine("machine-3", condition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionUnknown, Reason: "SomeUnknownReason", Message: "Some unknown message", })), - fakeMachine("machine-4", v1beta2Condition(metav1.Condition{ + fakeMachine("machine-4", condition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeletingReason, @@ -1035,7 +1035,7 @@ func TestSetControlPlaneMachinesUpToDateCondition(t *testing.T) { name: "One machine up-to-date", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "some-reason-1", @@ -1053,7 +1053,7 @@ func TestSetControlPlaneMachinesUpToDateCondition(t *testing.T) { name: "One machine unknown", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("unknown-1", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("unknown-1", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionUnknown, Reason: "some-unknown-reason-1", @@ -1072,7 +1072,7 @@ func TestSetControlPlaneMachinesUpToDateCondition(t *testing.T) { name: "One machine not up-to-date", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("not-up-to-date-machine-1", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "some-not-up-to-date-reason", @@ -1088,48 +1088,48 @@ func TestSetControlPlaneMachinesUpToDateCondition(t *testing.T) { }, }, { - name: "One machine without up-to-date condition, one new Machines without up-to-date condition", + name: "One machine without up-to-date v1beta1Condition, one new Machines without up-to-date v1beta1Condition", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("no-condition-machine-1", controlPlane(true)), - fakeMachine("no-condition-machine-2-new", controlPlane(true), creationTimestamp{Time: time.Now().Add(-5 * time.Second)}), // ignored because it's new + fakeMachine("no-v1beta1Condition-machine-1", controlPlane(true)), + fakeMachine("no-v1beta1Condition-machine-2-new", controlPlane(true), creationTimestamp{Time: time.Now().Add(-5 * time.Second)}), // ignored because it's new }, getDescendantsSucceeded: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneMachinesUpToDateCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterControlPlaneMachinesUpToDateUnknownReason, - Message: "* Machine no-condition-machine-1: Condition UpToDate not yet reported", + Message: "* Machine no-v1beta1Condition-machine-1: Condition UpToDate not yet reported", }, }, { name: "Two machines not up-to-date, two up-to-date, two not reported", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("up-to-date-2", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-2", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("not-up-to-date-machine-1", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", Message: "This is not up-to-date message", })), - fakeMachine("not-up-to-date-machine-2", controlPlane(true), v1beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-2", controlPlane(true), condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", Message: "This is not up-to-date message", })), - fakeMachine("no-condition-machine-1", controlPlane(true)), - fakeMachine("no-condition-machine-2", controlPlane(true)), + fakeMachine("no-v1beta1Condition-machine-1", controlPlane(true)), + fakeMachine("no-v1beta1Condition-machine-2", controlPlane(true)), }, getDescendantsSucceeded: true, expectCondition: metav1.Condition{ @@ -1137,7 +1137,7 @@ func TestSetControlPlaneMachinesUpToDateCondition(t *testing.T) { Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneMachinesNotUpToDateReason, Message: "* Machines not-up-to-date-machine-1, not-up-to-date-machine-2: This is not up-to-date message\n" + - "* Machines no-condition-machine-1, no-condition-machine-2: Condition UpToDate not yet reported", + "* Machines no-v1beta1Condition-machine-1, no-v1beta1Condition-machine-2: Condition UpToDate not yet reported", }, }, } @@ -1193,7 +1193,7 @@ func TestSetWorkerMachinesUpToDateCondition(t *testing.T) { name: "One machine up-to-date", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", v1beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "some-reason-1", @@ -1211,7 +1211,7 @@ func TestSetWorkerMachinesUpToDateCondition(t *testing.T) { name: "One machine unknown", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("unknown-1", v1beta2Condition(metav1.Condition{ + fakeMachine("unknown-1", condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionUnknown, Reason: "some-unknown-reason-1", @@ -1230,7 +1230,7 @@ func TestSetWorkerMachinesUpToDateCondition(t *testing.T) { name: "One machine not up-to-date", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("not-up-to-date-machine-1", v1beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "some-not-up-to-date-reason", @@ -1246,48 +1246,48 @@ func TestSetWorkerMachinesUpToDateCondition(t *testing.T) { }, }, { - name: "One machine without up-to-date condition, one new Machines without up-to-date condition", + name: "One machine without up-to-date v1beta1Condition, one new Machines without up-to-date v1beta1Condition", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("no-condition-machine-1"), - fakeMachine("no-condition-machine-2-new", creationTimestamp{Time: time.Now().Add(-5 * time.Second)}), // ignored because it's new + fakeMachine("no-v1beta1Condition-machine-1"), + fakeMachine("no-v1beta1Condition-machine-2-new", creationTimestamp{Time: time.Now().Add(-5 * time.Second)}), // ignored because it's new }, getDescendantsSucceeded: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterWorkerMachinesUpToDateCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.ClusterWorkerMachinesUpToDateUnknownReason, - Message: "* Machine no-condition-machine-1: Condition UpToDate not yet reported", + Message: "* Machine no-v1beta1Condition-machine-1: Condition UpToDate not yet reported", }, }, { name: "Two machines not up-to-date, two up-to-date, two not reported", cluster: fakeCluster("c"), machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", v1beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("up-to-date-2", v1beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-2", condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("not-up-to-date-machine-1", v1beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", Message: "This is not up-to-date message", })), - fakeMachine("not-up-to-date-machine-2", v1beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-2", condition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", Message: "This is not up-to-date message", })), - fakeMachine("no-condition-machine-1"), - fakeMachine("no-condition-machine-2"), + fakeMachine("no-v1beta1Condition-machine-1"), + fakeMachine("no-v1beta1Condition-machine-2"), }, getDescendantsSucceeded: true, expectCondition: metav1.Condition{ @@ -1295,7 +1295,7 @@ func TestSetWorkerMachinesUpToDateCondition(t *testing.T) { Status: metav1.ConditionFalse, Reason: clusterv1.ClusterWorkerMachinesNotUpToDateReason, Message: "* Machines not-up-to-date-machine-1, not-up-to-date-machine-2: This is not up-to-date message\n" + - "* Machines no-condition-machine-1, no-condition-machine-2: Condition UpToDate not yet reported", + "* Machines no-v1beta1Condition-machine-1, no-v1beta1Condition-machine-2: Condition UpToDate not yet reported", }, }, } @@ -1388,12 +1388,12 @@ func TestSetRollingOutCondition(t *testing.T) { cluster: fakeCluster("c", controlPlaneRef{}), controlPlane: fakeControlPlane("cp1", condition{ Type: clusterv1.ClusterRollingOutCondition, - Status: corev1.ConditionTrue, + Status: metav1.ConditionTrue, Reason: clusterv1.RollingOutReason, Message: "Rolling out 3 not up-to-date replicas", }), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.RollingOutCondition, Status: metav1.ConditionTrue, Reason: clusterv1.RollingOutReason, @@ -1401,7 +1401,7 @@ func TestSetRollingOutCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.MachineDeploymentRollingOutCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentRollingOutReason, @@ -1423,7 +1423,7 @@ func TestSetRollingOutCondition(t *testing.T) { cluster: fakeCluster("c", controlPlaneRef{}), controlPlane: fakeControlPlane("cp1"), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.RollingOutCondition, Status: metav1.ConditionTrue, Reason: clusterv1.RollingOutReason, @@ -1431,7 +1431,7 @@ func TestSetRollingOutCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.MachineDeploymentRollingOutCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentRollingOutReason, @@ -1473,7 +1473,7 @@ func TestSetRollingOutCondition(t *testing.T) { name: "cluster without controlplane, descendants report rolling out", cluster: fakeCluster("c"), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.RollingOutCondition, Status: metav1.ConditionTrue, Reason: clusterv1.RollingOutReason, @@ -1481,7 +1481,7 @@ func TestSetRollingOutCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.MachineDeploymentRollingOutCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentRollingOutReason, @@ -1572,7 +1572,7 @@ func TestSetScalingUpCondition(t *testing.T) { }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ *fakeMachineSet("ms1", OwnedByCluster("c")), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionFalse, Reason: "Foo", @@ -1594,12 +1594,12 @@ func TestSetScalingUpCondition(t *testing.T) { cluster: fakeCluster("c", controlPlaneRef{}), controlPlane: fakeControlPlane("cp1", condition{ Type: clusterv1.ClusterScalingUpCondition, - Status: corev1.ConditionTrue, + Status: metav1.ConditionTrue, Reason: clusterv1.ScalingUpReason, Message: "Scaling up from 0 to 3 replicas", }), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.ScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ScalingUpReason, @@ -1607,7 +1607,7 @@ func TestSetScalingUpCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentScalingUpReason, @@ -1615,13 +1615,13 @@ func TestSetScalingUpCondition(t *testing.T) { }), }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ - *fakeMachineSet("ms1", OwnedByCluster("c"), v1beta2Condition{ + *fakeMachineSet("ms1", OwnedByCluster("c"), condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetScalingUpReason, Message: "Scaling up from 2 to 7 replicas", }), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1644,7 +1644,7 @@ func TestSetScalingUpCondition(t *testing.T) { cluster: fakeCluster("c", controlPlaneRef{}), controlPlane: fakeControlPlane("cp1"), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.ScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ScalingUpReason, @@ -1652,7 +1652,7 @@ func TestSetScalingUpCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentScalingUpReason, @@ -1660,13 +1660,13 @@ func TestSetScalingUpCondition(t *testing.T) { }), }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ - *fakeMachineSet("ms1", OwnedByCluster("c"), v1beta2Condition{ + *fakeMachineSet("ms1", OwnedByCluster("c"), condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetScalingUpReason, Message: "Scaling up from 2 to 7 replicas", }), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1709,7 +1709,7 @@ func TestSetScalingUpCondition(t *testing.T) { name: "cluster without controlplane, descendants report scaling up", cluster: fakeCluster("c"), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.ScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ScalingUpReason, @@ -1717,7 +1717,7 @@ func TestSetScalingUpCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentScalingUpReason, @@ -1725,13 +1725,13 @@ func TestSetScalingUpCondition(t *testing.T) { }), }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ - *fakeMachineSet("ms1", OwnedByCluster("c"), v1beta2Condition{ + *fakeMachineSet("ms1", OwnedByCluster("c"), condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetScalingUpReason, Message: "Scaling up from 2 to 7 replicas", }), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingUpCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1823,7 +1823,7 @@ func TestSetScalingDownCondition(t *testing.T) { }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ *fakeMachineSet("ms1", OwnedByCluster("c")), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionFalse, Reason: "Foo", @@ -1845,12 +1845,12 @@ func TestSetScalingDownCondition(t *testing.T) { cluster: fakeCluster("c", controlPlaneRef{}), controlPlane: fakeControlPlane("cp1", condition{ Type: clusterv1.ClusterScalingDownCondition, - Status: corev1.ConditionTrue, + Status: metav1.ConditionTrue, Reason: "Foo", Message: "Scaling down from 0 to 3 replicas", }), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1858,7 +1858,7 @@ func TestSetScalingDownCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1866,13 +1866,13 @@ func TestSetScalingDownCondition(t *testing.T) { }), }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ - *fakeMachineSet("ms1", OwnedByCluster("c"), v1beta2Condition{ + *fakeMachineSet("ms1", OwnedByCluster("c"), condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", Message: "Scaling down from 2 to 7 replicas", }), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1895,7 +1895,7 @@ func TestSetScalingDownCondition(t *testing.T) { cluster: fakeCluster("c", controlPlaneRef{}), controlPlane: fakeControlPlane("cp1"), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1903,7 +1903,7 @@ func TestSetScalingDownCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1911,13 +1911,13 @@ func TestSetScalingDownCondition(t *testing.T) { }), }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ - *fakeMachineSet("ms1", OwnedByCluster("c"), v1beta2Condition{ + *fakeMachineSet("ms1", OwnedByCluster("c"), condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", Message: "Scaling down from 2 to 7 replicas", }), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1960,7 +1960,7 @@ func TestSetScalingDownCondition(t *testing.T) { name: "cluster without controlplane, descendants report scaling down", cluster: fakeCluster("c"), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", v1beta2Condition{ + *fakeMachinePool("mp1", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1968,7 +1968,7 @@ func TestSetScalingDownCondition(t *testing.T) { }), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", v1beta2Condition{ + *fakeMachineDeployment("md1", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -1976,13 +1976,13 @@ func TestSetScalingDownCondition(t *testing.T) { }), }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ - *fakeMachineSet("ms1", OwnedByCluster("c"), v1beta2Condition{ + *fakeMachineSet("ms1", OwnedByCluster("c"), condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", Message: "Scaling down from 2 to 7 replicas", }), - *fakeMachineSet("ms2", v1beta2Condition{ + *fakeMachineSet("ms2", condition{ Type: clusterv1.ClusterScalingDownCondition, Status: metav1.ConditionTrue, Reason: "Foo", @@ -2014,10 +2014,9 @@ func TestSetScalingDownCondition(t *testing.T) { } func TestSetRemediatingCondition(t *testing.T) { - healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionTrue} - healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionFalse} - ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} + healthCheckSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionTrue} + healthCheckNotSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionFalse} + ownerRemediated := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} tests := []struct { name string @@ -2058,7 +2057,7 @@ func TestSetRemediatingCondition(t *testing.T) { machines: []*clusterv1.Machine{ fakeMachine("m1", condition(healthCheckSucceeded)), // Healthy machine fakeMachine("m2", condition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", condition(healthCheckNotSucceeded), condition(ownerRemediated), v1beta2Condition(ownerRemediatedV1Beta2)), + fakeMachine("m3", condition(healthCheckNotSucceeded), condition(ownerRemediated)), }, getDescendantsSucceeded: true, expectCondition: metav1.Condition{ @@ -2191,7 +2190,7 @@ func TestSetAvailableCondition(t *testing.T) { }, Status: clusterv1.ClusterStatus{ Conditions: []metav1.Condition{ - // No condition reported yet, the required ones should be reported as missing. + // No v1beta1Condition reported yet, the required ones should be reported as missing. }, }, }, @@ -2698,7 +2697,7 @@ func TestSetAvailableCondition(t *testing.T) { Type: clusterv1.ClusterTopologyReconciledCondition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterTopologyReconciledClusterClassNotReconciledReason, - Message: "ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if" + + Message: "ClusterClass not reconciled. If this v1beta1Condition persists please check ClusterClass status. A ClusterClass is reconciled if" + ".status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", }, }, @@ -2708,7 +2707,7 @@ func TestSetAvailableCondition(t *testing.T) { Type: clusterv1.ClusterAvailableCondition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterNotAvailableReason, - Message: "* TopologyReconciled: ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if" + + Message: "* TopologyReconciled: ClusterClass not reconciled. If this v1beta1Condition persists please check ClusterClass status. A ClusterClass is reconciled if" + ".status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", }, }, @@ -2754,7 +2753,7 @@ func TestSetAvailableCondition(t *testing.T) { Type: clusterv1.ClusterTopologyReconciledCondition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterTopologyReconciledClusterClassNotReconciledReason, - Message: "ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if" + + Message: "ClusterClass not reconciled. If this v1beta1Condition persists please check ClusterClass status. A ClusterClass is reconciled if" + ".status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", }, }, @@ -2765,7 +2764,7 @@ func TestSetAvailableCondition(t *testing.T) { Status: metav1.ConditionFalse, Reason: clusterv1.ClusterNotAvailableReason, Message: "* WorkersAvailable: 3 available replicas, at least 4 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 5) from MachineDeployment md1; 2 available replicas, at least 3 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 4) from MachinePool mp1\n" + - "* TopologyReconciled: ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if.status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", + "* TopologyReconciled: ClusterClass not reconciled. If this v1beta1Condition persists please check ClusterClass status. A ClusterClass is reconciled if.status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", }, }, } @@ -3035,38 +3034,26 @@ func (s deleted) ApplyToCluster(c *clusterv1.Cluster) { c.SetDeletionTimestamp(nil) } -type v1beta2Condition metav1.Condition +type condition metav1.Condition -func (c v1beta2Condition) ApplyToCluster(cluster *clusterv1.Cluster) { +func (c condition) ApplyToCluster(cluster *clusterv1.Cluster) { conditions.Set(cluster, metav1.Condition(c)) } -func (c v1beta2Condition) ApplyToMachinePool(mp *expv1.MachinePool) { +func (c condition) ApplyToMachinePool(mp *expv1.MachinePool) { conditions.Set(mp, metav1.Condition(c)) } -func (c v1beta2Condition) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { +func (c condition) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { conditions.Set(md, metav1.Condition(c)) } -func (c v1beta2Condition) ApplyToMachineSet(ms *clusterv1.MachineSet) { +func (c condition) ApplyToMachineSet(ms *clusterv1.MachineSet) { conditions.Set(ms, metav1.Condition(c)) } -func (c v1beta2Condition) ApplyToMachine(m *clusterv1.Machine) { - conditions.Set(m, metav1.Condition(c)) -} - -type condition clusterv1.Condition - func (c condition) ApplyToMachine(m *clusterv1.Machine) { - if m.Status.Deprecated == nil { - m.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{} - } - if m.Status.Deprecated.V1Beta1 == nil { - m.Status.Deprecated.V1Beta1 = &clusterv1.MachineV1Beta1DeprecatedStatus{} - } - m.Status.Deprecated.V1Beta1.Conditions = append(m.Status.Deprecated.V1Beta1.Conditions, clusterv1.Condition(c)) + conditions.Set(m, metav1.Condition(c)) } func (c condition) ApplyToControlPlane(cp *unstructured.Unstructured) { @@ -3084,11 +3071,10 @@ func (c condition) applyToUnstructured(i *unstructured.Unstructured) { } t, _ := c.LastTransitionTime.MarshalQueryParameter() conditions = append(conditions, map[string]interface{}{ - "type": string(c.Type), + "type": c.Type, "status": string(c.Status), "reason": c.Reason, "message": c.Message, - "severity": string(c.Severity), "lastTransitionTime": t, }) _ = unstructured.SetNestedSlice(i.Object, conditions, "status", "conditions") diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index 3cb412a7b01f..f4d9ba5c2022 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -84,7 +84,7 @@ func TestClusterReconciler(t *testing.T) { return len(instance.Finalizers) > 0 }, timeout).Should(BeTrue()) - // Validate the RemoteConnectionProbe condition is false (because kubeconfig Secret doesn't exist) + // Validate the RemoteConnectionProbe v1beta1Condition is false (because kubeconfig Secret doesn't exist) g.Eventually(func(g Gomega) { g.Expect(env.Get(ctx, key, instance)).To(Succeed()) diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 0f374efaf03a..278bc63ed0d0 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -3570,7 +3570,7 @@ func TestNodeDeletionWithoutNodeRefFallback(t *testing.T) { // adds a condition list to an external object. func addConditionToExternal(u *unstructured.Unstructured, c metav1.Condition) { - unstructured.SetNestedSlice(u.Object, []interface{}{ + err := unstructured.SetNestedSlice(u.Object, []interface{}{ map[string]interface{}{ "type": c.Type, "status": string(c.Status), @@ -3578,6 +3578,9 @@ func addConditionToExternal(u *unstructured.Unstructured, c metav1.Condition) { "message": c.Message, }, }, "status", "conditions") + if err != nil { + panic(err) + } } // asserts the conditions set on the Getter object. diff --git a/internal/controllers/machinedeployment/machinedeployment_status_test.go b/internal/controllers/machinedeployment/machinedeployment_status_test.go index 337d19d1e1b7..f0cabbc3df6c 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_status_test.go @@ -271,8 +271,8 @@ func Test_setRollingOutCondition(t *testing.T) { name: "all machines are up to date", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(upToDateCondition)), - fakeMachine("machine-2", withV1Beta2Condition(upToDateCondition)), + fakeMachine("machine-1", withCondition(upToDateCondition)), + fakeMachine("machine-2", withCondition(upToDateCondition)), }, getMachinesSucceeded: true, expectCondition: metav1.Condition{ @@ -285,20 +285,20 @@ func Test_setRollingOutCondition(t *testing.T) { name: "one up-to-date, two not up-to-date, one reporting up-to-date unknown", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(upToDateCondition)), - fakeMachine("machine-2", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-1", withCondition(upToDateCondition)), + fakeMachine("machine-2", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.InternalErrorReason, })), - fakeMachine("machine-4", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-4", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotUpToDateReason, Message: "* Failure domain failure-domain1, failure-domain2 required\n" + "* InfrastructureMachine is not up-to-date", })), - fakeMachine("machine-3", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-3", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotUpToDateReason, @@ -803,8 +803,8 @@ func Test_setMachinesReadyCondition(t *testing.T) { name: "all machines are ready", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(readyCondition)), - fakeMachine("machine-2", withV1Beta2Condition(readyCondition)), + fakeMachine("machine-1", withCondition(readyCondition)), + fakeMachine("machine-2", withCondition(readyCondition)), }, getMachinesSucceeded: true, expectCondition: metav1.Condition{ @@ -817,7 +817,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { name: "one ready, one has nothing reported", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(readyCondition)), + fakeMachine("machine-1", withCondition(readyCondition)), fakeMachine("machine-2"), }, getMachinesSucceeded: true, @@ -832,20 +832,20 @@ func Test_setMachinesReadyCondition(t *testing.T) { name: "one ready, one reporting not ready, one reporting unknown, one reporting deleting", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(readyCondition)), - fakeMachine("machine-2", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-1", withCondition(readyCondition)), + fakeMachine("machine-2", withCondition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: "SomeReason", Message: "HealthCheckSucceeded: Some message", })), - fakeMachine("machine-3", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-3", withCondition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionUnknown, Reason: "SomeUnknownReason", Message: "Some unknown message", })), - fakeMachine("machine-4", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-4", withCondition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeletingReason, @@ -916,7 +916,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "One machine up-to-date", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "some-reason-1", @@ -934,7 +934,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "One machine unknown", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("unknown-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("unknown-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionUnknown, Reason: "some-unknown-reason-1", @@ -953,7 +953,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "One machine not up-to-date", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("not-up-to-date-machine-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "some-not-up-to-date-reason", @@ -987,23 +987,23 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "Two machines not up-to-date, two up-to-date, two not reported", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("up-to-date-2", withV1Beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-2", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("not-up-to-date-machine-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", Message: "This is not up-to-date message", })), - fakeMachine("not-up-to-date-machine-2", withV1Beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-2", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", @@ -1040,10 +1040,9 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { } func Test_setRemediatingCondition(t *testing.T) { - healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionTrue} - healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionFalse} - ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} + healthCheckSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionTrue} + healthCheckNotSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionFalse} + ownerRemediated := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} tests := []struct { name string @@ -1082,9 +1081,9 @@ func Test_setRemediatingCondition(t *testing.T) { name: "With machines to be remediated by MD/MS", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("m1", withConditions(healthCheckSucceeded)), // Healthy machine - fakeMachine("m2", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", withConditions(healthCheckNotSucceeded, ownerRemediated), withV1Beta2Condition(ownerRemediatedV1Beta2)), + fakeMachine("m1", withCondition(healthCheckSucceeded)), // Healthy machine + fakeMachine("m2", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m3", withCondition(healthCheckNotSucceeded), withCondition(ownerRemediated)), }, getMachinesSucceeded: true, expectCondition: metav1.Condition{ @@ -1098,9 +1097,9 @@ func Test_setRemediatingCondition(t *testing.T) { name: "With one unhealthy machine not to be remediated by MD/MS", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("m1", withConditions(healthCheckSucceeded)), // Healthy machine - fakeMachine("m2", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", withConditions(healthCheckSucceeded)), // Healthy machine + fakeMachine("m1", withCondition(healthCheckSucceeded)), // Healthy machine + fakeMachine("m2", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m3", withCondition(healthCheckSucceeded)), // Healthy machine }, getMachinesSucceeded: true, expectCondition: metav1.Condition{ @@ -1114,9 +1113,9 @@ func Test_setRemediatingCondition(t *testing.T) { name: "With two unhealthy machine not to be remediated by MD/MS", machineDeployment: &clusterv1.MachineDeployment{}, machines: []*clusterv1.Machine{ - fakeMachine("m1", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m2", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", withConditions(healthCheckSucceeded)), // Healthy machine + fakeMachine("m1", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m2", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m3", withCondition(healthCheckSucceeded)), // Healthy machine }, getMachinesSucceeded: true, expectCondition: metav1.Condition{ @@ -1332,20 +1331,8 @@ func withStaleDeletion() fakeMachinesOption { } } -func withV1Beta2Condition(c metav1.Condition) fakeMachinesOption { +func withCondition(c metav1.Condition) fakeMachinesOption { return func(m *clusterv1.Machine) { conditions.Set(m, c) } } - -func withConditions(c ...clusterv1.Condition) fakeMachinesOption { - return func(m *clusterv1.Machine) { - if m.Status.Deprecated == nil { - m.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{} - } - if m.Status.Deprecated.V1Beta1 == nil { - m.Status.Deprecated.V1Beta1 = &clusterv1.MachineV1Beta1DeprecatedStatus{} - } - m.Status.Deprecated.V1Beta1.Conditions = append(m.Status.Deprecated.V1Beta1.Conditions, c...) - } -} diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index c4750a244bf0..64d7432e3fc4 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -418,8 +418,7 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg errList := []error{} for _, t := range unhealthy { logger := logger.WithValues("Machine", klog.KObj(t.Machine), "Node", klog.KObj(t.Node)) - // TODO (v1beta2): test for v1beta2 conditions - condition := v1beta1conditions.Get(t.Machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) + condition := conditions.Get(t.Machine, clusterv1.MachineHealthCheckSucceededCondition) if annotations.IsPaused(cluster, t.Machine) { logger.Info("Machine has failed health check, but machine is paused so skipping remediation", "reason", condition.Reason, "message", condition.Message) @@ -497,7 +496,6 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg logger.Info("Machine has failed health check, marking for remediation", "reason", condition.Reason, "message", condition.Message) // NOTE: MHC is responsible for creating MachineOwnerRemediatedCondition if missing or to trigger another remediation if the previous one is completed; // instead, if a remediation is in already progress, the remediation owner is responsible for completing the process and MHC should not overwrite the condition. - // TODO (v1beta2): test for v1beta2 conditions if !v1beta1conditions.Has(t.Machine, clusterv1.MachineOwnerRemediatedV1Beta1Condition) || v1beta1conditions.IsTrue(t.Machine, clusterv1.MachineOwnerRemediatedV1Beta1Condition) { v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 68ee40df7c04..0dc94a5c1e97 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -556,7 +556,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta1Condition) { + if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } @@ -575,10 +575,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta1Condition) { + if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } - if !v1beta1conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedV1Beta1Condition) { + if !conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { continue } @@ -666,7 +666,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta1Condition) { + if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } @@ -685,10 +685,10 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } for i := range machines.Items { - if !v1beta1conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededV1Beta1Condition) { + if !conditions.IsFalse(&machines.Items[i], clusterv1.MachineHealthCheckSucceededCondition) { continue } - if !v1beta1conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedV1Beta1Condition) { + if !conditions.Has(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) { continue } @@ -1434,7 +1434,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { })).To(Succeed()) for i := range machines.Items { - if v1beta1conditions.Get(&machines.Items[i], clusterv1.MachineOwnerRemediatedV1Beta1Condition) != nil { + if conditions.Get(&machines.Items[i], clusterv1.MachineOwnerRemediatedCondition) != nil { unhealthyMachine = machines.Items[i].DeepCopy() } } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index 607a6596970c..51f1e563a042 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -263,8 +263,8 @@ func TestHealthCheckTargets(t *testing.T) { Machine: testMachineCreated1200s, Node: nil, } - nodeNotYetStartedTarget1200sCondition := newFailedHealthCheckCondition(clusterv1.NodeStartupTimeoutV1Beta1Reason, "Node failed to report startup in %s", timeoutForMachineToHaveNode) - nodeNotYetStartedTarget1200sV1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckNodeStartupTimeoutReason, "Health check failed: Node failed to report startup in %s", timeoutForMachineToHaveNode) + nodeNotYetStartedTarget1200sCondition := newFailedHealthCheckV1Beta1Condition(clusterv1.NodeStartupTimeoutV1Beta1Reason, "Node failed to report startup in %s", timeoutForMachineToHaveNode) + nodeNotYetStartedTarget1200sV1Beta2Condition := newFailedHealthCheckCondition(clusterv1.MachineHealthCheckNodeStartupTimeoutReason, "Health check failed: Node failed to report startup in %s", timeoutForMachineToHaveNode) testMachineCreated400s := testMachine.DeepCopy() nowMinus400s := metav1.NewTime(time.Now().Add(-400 * time.Second)) @@ -285,8 +285,8 @@ func TestHealthCheckTargets(t *testing.T) { Node: &corev1.Node{}, nodeMissing: true, } - nodeGoneAwayCondition := newFailedHealthCheckCondition(clusterv1.NodeNotFoundV1Beta1Reason, "") - nodeGoneAwayV1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckNodeDeletedReason, "Health check failed: Node %s has been deleted", testMachine.Status.NodeRef.Name) + nodeGoneAwayCondition := newFailedHealthCheckV1Beta1Condition(clusterv1.NodeNotFoundV1Beta1Reason, "") + nodeGoneAwayV1Beta2Condition := newFailedHealthCheckCondition(clusterv1.MachineHealthCheckNodeDeletedReason, "Health check failed: Node %s has been deleted", testMachine.Status.NodeRef.Name) // Create a test MHC without conditions testMHCEmptyConditions := &clusterv1.MachineHealthCheck{ @@ -365,8 +365,8 @@ func TestHealthCheckTargets(t *testing.T) { Node: testNodeUnknown400, nodeMissing: false, } - nodeUnknown400Condition := newFailedHealthCheckCondition(clusterv1.UnhealthyNodeConditionV1Beta1Reason, "Condition Ready on node is reporting status Unknown for more than %s", timeoutForUnhealthyConditions) - nodeUnknown400V1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckUnhealthyNodeReason, "Health check failed: Condition Ready on Node is reporting status Unknown for more than %s", timeoutForUnhealthyConditions) + nodeUnknown400Condition := newFailedHealthCheckV1Beta1Condition(clusterv1.UnhealthyNodeConditionV1Beta1Reason, "Condition Ready on node is reporting status Unknown for more than %s", timeoutForUnhealthyConditions) + nodeUnknown400V1Beta2Condition := newFailedHealthCheckCondition(clusterv1.MachineHealthCheckUnhealthyNodeReason, "Health check failed: Condition Ready on Node is reporting status Unknown for more than %s", timeoutForUnhealthyConditions) // Target for when a node is healthy testNodeHealthy := newTestNode("node1") @@ -393,7 +393,7 @@ func TestHealthCheckTargets(t *testing.T) { Machine: testMachineFailureReason, Node: nil, } - machineFailureReasonCondition := newFailedHealthCheckCondition(clusterv1.MachineHasFailureV1Beta1Reason, "FailureReason: %s", failureReason) + machineFailureReasonCondition := newFailedHealthCheckV1Beta1Condition(clusterv1.MachineHasFailureV1Beta1Reason, "FailureReason: %s", failureReason) // Target for when the machine has a failure message failureMsg := "some failure message" @@ -409,7 +409,7 @@ func TestHealthCheckTargets(t *testing.T) { Machine: testMachineFailureMsg, Node: nil, } - machineFailureMsgCondition := newFailedHealthCheckCondition(clusterv1.MachineHasFailureV1Beta1Reason, "FailureMessage: %s", failureMsg) + machineFailureMsgCondition := newFailedHealthCheckV1Beta1Condition(clusterv1.MachineHasFailureV1Beta1Reason, "FailureMessage: %s", failureMsg) // Target for when the machine has the remediate machine annotation const annotationRemediationMsg = "Marked for remediation via remediate-machine annotation" @@ -422,8 +422,8 @@ func TestHealthCheckTargets(t *testing.T) { Machine: testMachineAnnotationRemediation, Node: nil, } - machineAnnotationRemediationCondition := newFailedHealthCheckCondition(clusterv1.HasRemediateMachineAnnotationV1Beta1Reason, annotationRemediationMsg) - machineAnnotationRemediationV1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckHasRemediateAnnotationReason, annotationRemediationV1Beta2Msg) + machineAnnotationRemediationCondition := newFailedHealthCheckV1Beta1Condition(clusterv1.HasRemediateMachineAnnotationV1Beta1Reason, annotationRemediationMsg) + machineAnnotationRemediationV1Beta2Condition := newFailedHealthCheckCondition(clusterv1.MachineHealthCheckHasRemediateAnnotationReason, annotationRemediationV1Beta2Msg) testCases := []struct { desc string @@ -684,11 +684,11 @@ func newTestUnhealthyNode(name string, condition corev1.NodeConditionType, statu } } -func newFailedHealthCheckCondition(reason string, messageFormat string, messageArgs ...interface{}) clusterv1.Condition { +func newFailedHealthCheckV1Beta1Condition(reason string, messageFormat string, messageArgs ...interface{}) clusterv1.Condition { return *v1beta1conditions.FalseCondition(clusterv1.MachineHealthCheckSucceededV1Beta1Condition, reason, clusterv1.ConditionSeverityWarning, messageFormat, messageArgs...) } -func newFailedHealthCheckV1Beta2Condition(reason string, messageFormat string, messageArgs ...interface{}) metav1.Condition { +func newFailedHealthCheckCondition(reason string, messageFormat string, messageArgs ...interface{}) metav1.Condition { return metav1.Condition{ Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionFalse, diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 388f68f10735..836e48658578 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -1417,10 +1417,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( // reports that remediation has been completed and the Machine has been deleted. for _, m := range machines { if !m.DeletionTimestamp.IsZero() { - // TODO (v1beta2): test for v1beta2 conditions - // TODO: Check for Status: False and Reason: MachineSetMachineRemediationMachineDeletingV1Beta2Reason - // instead when starting to use v1beta2 conditions for control flow. - if v1beta1conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedV1Beta1Condition) { + if c := conditions.Get(m, clusterv1.MachineOwnerRemediatedCondition); c != nil && c.Status == metav1.ConditionFalse && c.Reason == clusterv1.MachineSetMachineRemediationMachineDeletingReason { // Remediation for this Machine has been triggered by this controller but it is still in flight, // i.e. it still goes through the deletion workflow and exists in etcd. maxInFlight-- @@ -1536,7 +1533,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( return ctrl.Result{}, nil } -func patchMachineConditions(ctx context.Context, c client.Client, machines []*clusterv1.Machine, v1beta2Condition metav1.Condition, condition *clusterv1.Condition) error { +func patchMachineConditions(ctx context.Context, c client.Client, machines []*clusterv1.Machine, condition metav1.Condition, v1beta1condition *clusterv1.Condition) error { var errs []error for _, m := range machines { patchHelper, err := patch.NewHelper(m, c) @@ -1545,10 +1542,10 @@ func patchMachineConditions(ctx context.Context, c client.Client, machines []*cl continue } - if condition != nil { - v1beta1conditions.Set(m, condition) + if v1beta1condition != nil { + v1beta1conditions.Set(m, v1beta1condition) } - conditions.Set(m, v1beta2Condition) + conditions.Set(m, condition) if err := patchHelper.Patch(ctx, m, patch.WithOwnedV1beta1Conditions{Conditions: []clusterv1.ConditionType{ diff --git a/internal/controllers/machineset/machineset_controller_status_test.go b/internal/controllers/machineset/machineset_controller_status_test.go index 8f0886bc17a2..82703ce63c50 100644 --- a/internal/controllers/machineset/machineset_controller_status_test.go +++ b/internal/controllers/machineset/machineset_controller_status_test.go @@ -572,7 +572,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { name: "one machine is ready", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(readyCondition)), + fakeMachine("machine-1", withCondition(readyCondition)), }, getAndAdoptMachinesForMachineSetSucceeded: true, expectCondition: metav1.Condition{ @@ -585,8 +585,8 @@ func Test_setMachinesReadyCondition(t *testing.T) { name: "all machines are ready", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(readyCondition)), - fakeMachine("machine-2", withV1Beta2Condition(readyCondition)), + fakeMachine("machine-1", withCondition(readyCondition)), + fakeMachine("machine-2", withCondition(readyCondition)), }, getAndAdoptMachinesForMachineSetSucceeded: true, expectCondition: metav1.Condition{ @@ -599,7 +599,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { name: "one ready, one has nothing reported", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(readyCondition)), + fakeMachine("machine-1", withCondition(readyCondition)), fakeMachine("machine-2"), }, getAndAdoptMachinesForMachineSetSucceeded: true, @@ -614,20 +614,20 @@ func Test_setMachinesReadyCondition(t *testing.T) { name: "one ready, one reporting not ready, one reporting unknown, one reporting deleting", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("machine-1", withV1Beta2Condition(readyCondition)), - fakeMachine("machine-2", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-1", withCondition(readyCondition)), + fakeMachine("machine-2", withCondition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: "SomeReason", Message: "HealthCheckSucceeded: Some message", })), - fakeMachine("machine-3", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-3", withCondition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionUnknown, Reason: "SomeUnknownReason", Message: "Some unknown message", })), - fakeMachine("machine-4", withV1Beta2Condition(metav1.Condition{ + fakeMachine("machine-4", withCondition(metav1.Condition{ Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineDeletingReason, @@ -696,7 +696,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "One machine up-to-date", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "some-reason-1", @@ -714,7 +714,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "One machine unknown", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("unknown-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("unknown-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionUnknown, Reason: "some-unknown-reason-1", @@ -733,7 +733,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "One machine not up-to-date", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("not-up-to-date-machine-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "some-not-up-to-date-reason", @@ -767,23 +767,23 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { name: "Two machines not up-to-date, two up-to-date, two not reported", machineSet: machineSet, machines: []*clusterv1.Machine{ - fakeMachine("up-to-date-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("up-to-date-2", withV1Beta2Condition(metav1.Condition{ + fakeMachine("up-to-date-2", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue, Reason: "TestUpToDate", })), - fakeMachine("not-up-to-date-machine-1", withV1Beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-1", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", Message: "This is not up-to-date message", })), - fakeMachine("not-up-to-date-machine-2", withV1Beta2Condition(metav1.Condition{ + fakeMachine("not-up-to-date-machine-2", withCondition(metav1.Condition{ Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionFalse, Reason: "TestNotUpToDate", @@ -816,11 +816,10 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { } func Test_setRemediatingCondition(t *testing.T) { - healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionTrue} - healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: corev1.ConditionFalse} - ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} - ownerRemediatedWaitingForRemediationV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineOwnerRemediatedWaitingForRemediationReason, Message: "KubeadmControlPlane ns1/cp1 is upgrading (\"ControlPlaneIsStable\" preflight check failed)"} + healthCheckSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionTrue} + healthCheckNotSucceeded := metav1.Condition{Type: clusterv1.MachineHealthCheckSucceededCondition, Status: metav1.ConditionFalse} + ownerRemediated := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingReason, Message: "Machine is deleting"} + ownerRemediatedWaitingForRemediation := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineOwnerRemediatedWaitingForRemediationReason, Message: "KubeadmControlPlane ns1/cp1 is upgrading (\"ControlPlaneIsStable\" preflight check failed)"} tests := []struct { name string @@ -859,9 +858,9 @@ func Test_setRemediatingCondition(t *testing.T) { name: "With machines to be remediated by MS", machineSet: &clusterv1.MachineSet{}, machines: []*clusterv1.Machine{ - fakeMachine("m1", withConditions(healthCheckSucceeded)), // Healthy machine - fakeMachine("m2", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", withConditions(healthCheckNotSucceeded, ownerRemediated), withV1Beta2Condition(ownerRemediatedV1Beta2)), + fakeMachine("m1", withCondition(healthCheckSucceeded)), // Healthy machine + fakeMachine("m2", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m3", withCondition(healthCheckNotSucceeded), withCondition(ownerRemediated)), }, getAndAdoptMachinesForMachineSetSucceeded: true, expectCondition: metav1.Condition{ @@ -875,10 +874,10 @@ func Test_setRemediatingCondition(t *testing.T) { name: "With machines to be remediated by MS and preflight check error", machineSet: &clusterv1.MachineSet{}, machines: []*clusterv1.Machine{ - fakeMachine("m1", withConditions(healthCheckSucceeded)), // Healthy machine - fakeMachine("m2", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", withConditions(healthCheckNotSucceeded, ownerRemediated), withV1Beta2Condition(ownerRemediatedV1Beta2)), - fakeMachine("m4", withConditions(healthCheckNotSucceeded, ownerRemediated), withV1Beta2Condition(ownerRemediatedWaitingForRemediationV1Beta2)), + fakeMachine("m1", withCondition(healthCheckSucceeded)), // Healthy machine + fakeMachine("m2", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m3", withCondition(healthCheckNotSucceeded), withCondition(ownerRemediated)), + fakeMachine("m4", withCondition(healthCheckNotSucceeded), withCondition(ownerRemediatedWaitingForRemediation)), }, getAndAdoptMachinesForMachineSetSucceeded: true, // This preflight check error can happen when a Machine becomes unhealthy while the control plane is upgrading. @@ -894,9 +893,9 @@ func Test_setRemediatingCondition(t *testing.T) { name: "With one unhealthy machine not to be remediated by MS", machineSet: &clusterv1.MachineSet{}, machines: []*clusterv1.Machine{ - fakeMachine("m1", withConditions(healthCheckSucceeded)), // Healthy machine - fakeMachine("m2", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", withConditions(healthCheckSucceeded)), // Healthy machine + fakeMachine("m1", withCondition(healthCheckSucceeded)), // Healthy machine + fakeMachine("m2", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m3", withCondition(healthCheckSucceeded)), // Healthy machine }, getAndAdoptMachinesForMachineSetSucceeded: true, expectCondition: metav1.Condition{ @@ -910,9 +909,9 @@ func Test_setRemediatingCondition(t *testing.T) { name: "With two unhealthy machine not to be remediated by MS", machineSet: &clusterv1.MachineSet{}, machines: []*clusterv1.Machine{ - fakeMachine("m1", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m2", withConditions(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation - fakeMachine("m3", withConditions(healthCheckSucceeded)), // Healthy machine + fakeMachine("m1", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m2", withCondition(healthCheckNotSucceeded)), // Unhealthy machine, not yet marked for remediation + fakeMachine("m3", withCondition(healthCheckSucceeded)), // Healthy machine }, getAndAdoptMachinesForMachineSetSucceeded: true, expectCondition: metav1.Condition{ @@ -1067,7 +1066,7 @@ func Test_aggregateStaleMachines(t *testing.T) { { name: "Does not report details about stale machines draining since less than 5 minutes", machines: []*clusterv1.Machine{ - fakeMachine("m1", withStaleDeletionTimestamp(), withV1Beta2Condition(metav1.Condition{ + fakeMachine("m1", withStaleDeletionTimestamp(), withCondition(metav1.Condition{ Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingDrainingNodeReason, @@ -1077,7 +1076,7 @@ func Test_aggregateStaleMachines(t *testing.T) { * Pod pod-6-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 1 After above Pods have been removed from the Node, the following Pods will be evicted: pod-7-eviction-later, pod-8-eviction-later`, })), - fakeMachine("m2", withStaleDeletionTimestamp(), withV1Beta2Condition(metav1.Condition{ + fakeMachine("m2", withStaleDeletionTimestamp(), withCondition(metav1.Condition{ Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingDrainingNodeReason, @@ -1092,7 +1091,7 @@ After above Pods have been removed from the Node, the following Pods will be evi { name: "Report details about stale machines draining since more than 5 minutes", machines: []*clusterv1.Machine{ - fakeMachine("m1", withStaleDeletionTimestamp(), withV1Beta2Condition(metav1.Condition{ + fakeMachine("m1", withStaleDeletionTimestamp(), withCondition(metav1.Condition{ Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingDrainingNodeReason, @@ -1102,7 +1101,7 @@ After above Pods have been removed from the Node, the following Pods will be evi * Pod pod-6-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 1 After above Pods have been removed from the Node, the following Pods will be evicted: pod-7-eviction-later, pod-8-eviction-later`, }), withStaleDrain()), - fakeMachine("m2", withStaleDeletionTimestamp(), withV1Beta2Condition(metav1.Condition{ + fakeMachine("m2", withStaleDeletionTimestamp(), withCondition(metav1.Condition{ Type: clusterv1.MachineDeletingCondition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeletingDrainingNodeReason, @@ -1166,20 +1165,8 @@ func withStaleDrain() fakeMachinesOption { } } -func withV1Beta2Condition(c metav1.Condition) fakeMachinesOption { +func withCondition(c metav1.Condition) fakeMachinesOption { return func(m *clusterv1.Machine) { conditions.Set(m, c) } } - -func withConditions(c ...clusterv1.Condition) fakeMachinesOption { - return func(m *clusterv1.Machine) { - if m.Status.Deprecated == nil { - m.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{} - } - if m.Status.Deprecated.V1Beta1 == nil { - m.Status.Deprecated.V1Beta1 = &clusterv1.MachineV1Beta1DeprecatedStatus{} - } - m.Status.Deprecated.V1Beta1.Conditions = append(m.Status.Deprecated.V1Beta1.Conditions, c...) - } -} diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 87aad0cfbaa2..24adf69e110a 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -1517,20 +1517,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { Finalizers: []string{"block-deletion"}, }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, - }, - }, Conditions: []metav1.Condition{ { Type: clusterv1.MachineOwnerRemediatedCondition, @@ -1553,21 +1539,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { Namespace: "default", }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - // This condition should be cleaned up because HealthCheckSucceeded is true. - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionTrue, - }, - }, - }, - }, Conditions: []metav1.Condition{ { // This condition should be cleaned up because HealthCheckSucceeded is true. @@ -1651,20 +1622,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { Namespace: "default", }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, - }, - }, Conditions: []metav1.Condition{ { Type: clusterv1.MachineOwnerRemediatedCondition, @@ -1687,21 +1644,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { Namespace: "default", }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - // This condition should be cleaned up because HealthCheckSucceeded is true. - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionTrue, - }, - }, - }, - }, Conditions: []metav1.Condition{ { // This condition should be cleaned up because HealthCheckSucceeded is true. @@ -1823,20 +1765,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { Finalizers: []string{"block-deletion"}, }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, - }, - }, Conditions: []metav1.Condition{ { Type: clusterv1.MachineOwnerRemediatedCondition, @@ -1859,21 +1787,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { Namespace: "default", }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - // This condition should be cleaned up because HealthCheckSucceeded is true. - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionTrue, - }, - }, - }, - }, Conditions: []metav1.Condition{ { // This condition should be cleaned up because HealthCheckSucceeded is true. @@ -1921,11 +1834,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { // Verify that no action was taken on the Machine: MachineOwnerRemediated should be false // and the Machine wasn't deleted. g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(unhealthyMachine), m)).To(Succeed()) - g.Expect(v1beta1conditions.Has(m, condition)). - To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) - g.Expect(machineOwnerRemediatedCondition.Status). - To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) g.Expect(unhealthyMachine.DeletionTimestamp).Should(BeZero()) c := conditions.Get(m, clusterv1.MachineOwnerRemediatedCondition) g.Expect(c).ToNot(BeNil()) @@ -2040,20 +1948,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { CreationTimestamp: metav1.Time{Time: metav1.Now().Add(time.Duration(i) * time.Second)}, }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, - }, - }, Conditions: []metav1.Condition{ { Type: clusterv1.MachineOwnerRemediatedCondition, @@ -2078,21 +1972,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { Namespace: "default", }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - // This condition should be cleaned up because HealthCheckSucceeded is true. - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionTrue, - }, - }, - }, - }, Conditions: []metav1.Condition{ { // This condition should be cleaned up because HealthCheckSucceeded is true. @@ -2143,11 +2022,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { if i < total-maxInFlight { // Machines before the maxInFlight should not be deleted. g.Expect(err).ToNot(HaveOccurred()) - g.Expect(v1beta1conditions.Has(m, condition)). - To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) - g.Expect(machineOwnerRemediatedCondition.Status). - To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) c := conditions.Get(m, clusterv1.MachineOwnerRemediatedCondition) g.Expect(c).ToNot(BeNil()) g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ @@ -2210,11 +2084,6 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { if i < total-(maxInFlight*2) { // Machines before the maxInFlight*2 should not be deleted, and should have the remediated condition to false. g.Expect(err).ToNot(HaveOccurred()) - g.Expect(v1beta1conditions.Has(m, condition)). - To(BeTrue(), "Machine should have the %s condition set", condition) - machineOwnerRemediatedCondition := v1beta1conditions.Get(m, condition) - g.Expect(machineOwnerRemediatedCondition.Status). - To(Equal(corev1.ConditionFalse), "%s condition status should be false", condition) c := conditions.Get(m, clusterv1.MachineOwnerRemediatedCondition) g.Expect(c).ToNot(BeNil()) g.Expect(*c).To(conditions.MatchCondition(metav1.Condition{ @@ -3174,20 +3043,6 @@ func TestSortMachinesToRemediate(t *testing.T) { }, }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, - }, - }, Conditions: []metav1.Condition{ { Type: clusterv1.MachineOwnerRemediatedCondition, @@ -3215,20 +3070,6 @@ func TestSortMachinesToRemediate(t *testing.T) { CreationTimestamp: metav1.Time{Time: metav1.Now().Add(time.Duration(i) * time.Second)}, }, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - { - Type: clusterv1.MachineOwnerRemediatedV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, - }, - }, Conditions: []metav1.Condition{ { Type: clusterv1.MachineOwnerRemediatedCondition, diff --git a/internal/controllers/machineset/machineset_delete_policy.go b/internal/controllers/machineset/machineset_delete_policy.go index 8a23f70989ea..adcb92c1d4f7 100644 --- a/internal/controllers/machineset/machineset_delete_policy.go +++ b/internal/controllers/machineset/machineset_delete_policy.go @@ -21,12 +21,10 @@ import ( "sort" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/util/conditions" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) type ( @@ -151,8 +149,8 @@ func isMachineHealthy(machine *clusterv1.Machine) bool { if conditions.IsFalse(machine, clusterv1.MachineNodeReadyCondition) { return false } - healthCheckCondition := v1beta1conditions.Get(machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) - if healthCheckCondition != nil && healthCheckCondition.Status == corev1.ConditionFalse { + healthCheckCondition := conditions.Get(machine, clusterv1.MachineHealthCheckSucceededCondition) + if healthCheckCondition != nil && healthCheckCondition.Status == metav1.ConditionFalse { return false } return true diff --git a/internal/controllers/machineset/machineset_delete_policy_test.go b/internal/controllers/machineset/machineset_delete_policy_test.go index fc7e08eaa447..7ce766b5a967 100644 --- a/internal/controllers/machineset/machineset_delete_policy_test.go +++ b/internal/controllers/machineset/machineset_delete_policy_test.go @@ -78,14 +78,10 @@ func TestMachineToDelete(t *testing.T) { healthCheckSucceededConditionFalseMachine := &clusterv1.Machine{ Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionFalse, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, }, }, }, @@ -93,14 +89,10 @@ func TestMachineToDelete(t *testing.T) { healthCheckSucceededConditionUnknownMachine := &clusterv1.Machine{ Status: clusterv1.MachineStatus{ NodeRef: nodeRef, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.MachineHealthCheckSucceededV1Beta1Condition, - Status: corev1.ConditionUnknown, - }, - }, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionUnknown, }, }, }, diff --git a/util/collections/machine_filters.go b/util/collections/machine_filters.go index ad1a7f7d3709..19267bad59cd 100644 --- a/util/collections/machine_filters.go +++ b/util/collections/machine_filters.go @@ -156,8 +156,7 @@ func IsUnhealthyAndOwnerRemediated(machine *clusterv1.Machine) bool { if machine == nil { return false } - // TODO (v1beta2): test for v1beta2 conditions - return v1beta1conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) && v1beta1conditions.IsFalse(machine, clusterv1.MachineOwnerRemediatedV1Beta1Condition) + return conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(machine, clusterv1.MachineOwnerRemediatedCondition) } // IsUnhealthy returns a filter to find all machines that have a MachineHealthCheckSucceeded condition set to False, @@ -166,8 +165,7 @@ func IsUnhealthy(machine *clusterv1.Machine) bool { if machine == nil { return false } - // TODO (v1beta2): test for v1beta2 conditions - return v1beta1conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) + return conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) } // HasUnhealthyControlPlaneComponents returns a filter to find all unhealthy control plane machines that diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index 0caea18dd71b..d346813980d5 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -29,6 +29,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/util/collections" + "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) @@ -89,22 +90,35 @@ func TestUnhealthyFilters(t *testing.T) { t.Run("healthy machine (with HealthCheckSucceeded condition == True) should return false", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - v1beta1conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition) + conditions.Set(m, metav1.Condition{ + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionTrue, + }) g.Expect(collections.IsUnhealthy(m)).To(BeFalse()) g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeFalse()) }) t.Run("unhealthy machine NOT eligible for KCP remediation (with withHealthCheckSucceeded condition == False but without OwnerRemediated) should return false", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - v1beta1conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") + conditions.Set(m, metav1.Condition{ + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, + }) g.Expect(collections.IsUnhealthy(m)).To(BeTrue()) g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeFalse()) }) t.Run("unhealthy machine eligible for KCP (with HealthCheckSucceeded condition == False and with OwnerRemediated) should return true", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - v1beta1conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") - v1beta1conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedV1Beta1Condition, clusterv1.WaitingForRemediationV1Beta1Reason, clusterv1.ConditionSeverityWarning, "") + + conditions.Set(m, metav1.Condition{ + Type: clusterv1.MachineHealthCheckSucceededCondition, + Status: metav1.ConditionFalse, + }) + conditions.Set(m, metav1.Condition{ + Type: clusterv1.MachineOwnerRemediatedCondition, + Status: metav1.ConditionFalse, + }) g.Expect(collections.IsUnhealthy(m)).To(BeTrue()) g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeTrue()) }) From 63fec21d12026b1f786fc4099542cdddcd3502da Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Tue, 22 Apr 2025 16:24:21 +0200 Subject: [PATCH 13/20] Stop using clusterv1.bootstrapv1.DataSecretAvailableV1Beta1Condition in controllers --- .../controllers/kubeadmconfig_controller.go | 3 +- .../kubeadmconfig_controller_test.go | 39 ++++++++----------- 2 files changed, 18 insertions(+), 24 deletions(-) diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 44082fa02ea0..7a3f0cd09ca3 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -482,8 +482,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // initialize the DataSecretAvailableCondition if missing. // this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing // using the DataSecretGeneratedFailedReason - // TODO (v1beta2): test for v1beta2 conditions - if v1beta1conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableV1Beta1Condition) != bootstrapv1.DataSecretGenerationFailedV1Beta1Reason { + if !conditions.Has(scope.Config, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) { v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableV1Beta1Condition, clusterv1.WaitingForControlPlaneAvailableV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") conditions.Set(scope.Config, metav1.Condition{ Type: bootstrapv1.KubeadmConfigDataSecretAvailableCondition, diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 7b1e270fbe0c..d861ad2e0dda 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -46,7 +46,6 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/conditions" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/secret" "sigs.k8s.io/cluster-api/util/test/builder" @@ -301,7 +300,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) - assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableV1Beta1Condition) + assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) } func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) { @@ -342,7 +341,7 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) actualResult, actualError := k.Reconcile(ctx, request) g.Expect(actualResult).To(BeComparableTo(expectedResult)) g.Expect(actualError).ToNot(HaveOccurred()) - assertHasFalseCondition(g, myclient, request, bootstrapv1.DataSecretAvailableV1Beta1Condition, clusterv1.ConditionSeverityInfo, bootstrapv1.WaitingForClusterInfrastructureV1Beta1Reason) + assertHasFalseCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition, bootstrapv1.KubeadmConfigDataSecretNotAvailableReason) } // Return early If the owning machine does not have an associated cluster. @@ -474,7 +473,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI g.Expect(err).ToNot(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) - assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableV1Beta1Condition, clusterv1.ConditionSeverityInfo, clusterv1.WaitingForControlPlaneAvailableV1Beta1Reason) + assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition, bootstrapv1.KubeadmConfigDataSecretNotAvailableReason) }) } } @@ -534,8 +533,8 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) g.Expect(cfg.Status.Initialization.DataSecretCreated).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) - assertHasTrueCondition(g, myclient, request, bootstrapv1.CertificatesAvailableV1Beta1Condition) - assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableV1Beta1Condition) + assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigCertificatesAvailableCondition) + assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) // Expect the Secret to exist, and for it to contain some data under the "value" key. g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: configName}, s)).To(Succeed()) @@ -587,8 +586,8 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC g.Expect(err).ToNot(HaveOccurred()) actualConfig := &bootstrapv1.KubeadmConfig{} g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: controlPlaneJoinConfig.Namespace, Name: controlPlaneJoinConfig.Name}, actualConfig)).To(Succeed()) - assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableV1Beta1Condition) - assertHasTrueCondition(g, myclient, request, bootstrapv1.CertificatesAvailableV1Beta1Condition) + assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) + assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigCertificatesAvailableCondition) } // If there is no APIEndpoint but everything is ready then requeue in hopes of a new APIEndpoint showing up eventually. @@ -636,9 +635,8 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp g.Expect(myclient.Get(ctx, client.ObjectKey{Namespace: workerJoinConfig.Namespace, Name: workerJoinConfig.Name}, actualConfig)).To(Succeed()) // At this point the DataSecretAvailableCondition should not be set. CertificatesAvailableCondition should be true. - // TODO (v1beta2): test for v1beta2 conditions - g.Expect(v1beta1conditions.Get(actualConfig, bootstrapv1.DataSecretAvailableV1Beta1Condition)).To(BeNil()) - assertHasTrueCondition(g, myclient, request, bootstrapv1.CertificatesAvailableV1Beta1Condition) + g.Expect(conditions.Get(actualConfig, bootstrapv1.KubeadmConfigDataSecretAvailableCondition)).To(BeNil()) + assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigCertificatesAvailableCondition) } func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsReady(t *testing.T) { @@ -717,7 +715,7 @@ func TestReconcileIfJoinCertificatesAvailableConditioninNodesAndControlPlaneIsRe g.Expect(cfg.Status.Initialization.DataSecretCreated).To(BeTrue()) g.Expect(cfg.Status.DataSecretName).NotTo(BeNil()) g.Expect(cfg.Status.ObservedGeneration).NotTo(BeNil()) - assertHasTrueCondition(g, myclient, request, bootstrapv1.DataSecretAvailableV1Beta1Condition) + assertHasTrueCondition(g, myclient, request, bootstrapv1.KubeadmConfigDataSecretAvailableCondition) l := &corev1.SecretList{} err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem))) @@ -2728,7 +2726,7 @@ func (m *myInitLocker) Unlock(_ context.Context, _ *clusterv1.Cluster) bool { return true } -func assertHasFalseCondition(g *WithT, myclient client.Client, req ctrl.Request, t clusterv1.ConditionType, s clusterv1.ConditionSeverity, r string) { +func assertHasFalseCondition(g *WithT, myclient client.Client, req ctrl.Request, conditionType string, reason string) { config := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: req.Name, @@ -2738,15 +2736,13 @@ func assertHasFalseCondition(g *WithT, myclient client.Client, req ctrl.Request, configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(ctx, configKey, config)).To(Succeed()) - // TODO (v1beta2): test for v1beta2 conditions - c := v1beta1conditions.Get(config, t) + c := conditions.Get(config, conditionType) g.Expect(c).ToNot(BeNil()) - g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) - g.Expect(c.Severity).To(Equal(s)) - g.Expect(c.Reason).To(Equal(r)) + g.Expect(c.Status).To(Equal(metav1.ConditionFalse)) + g.Expect(c.Reason).To(Equal(reason)) } -func assertHasTrueCondition(g *WithT, myclient client.Client, req ctrl.Request, t clusterv1.ConditionType) { +func assertHasTrueCondition(g *WithT, myclient client.Client, req ctrl.Request, conditionType string) { config := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ Name: req.Name, @@ -2755,10 +2751,9 @@ func assertHasTrueCondition(g *WithT, myclient client.Client, req ctrl.Request, } configKey := client.ObjectKeyFromObject(config) g.Expect(myclient.Get(ctx, configKey, config)).To(Succeed()) - // TODO (v1beta2): test for v1beta2 conditions - c := v1beta1conditions.Get(config, t) + c := conditions.Get(config, conditionType) g.Expect(c).ToNot(BeNil()) - g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) + g.Expect(c.Status).To(Equal(metav1.ConditionTrue)) } func TestKubeadmConfigReconciler_Reconcile_v1beta2_conditions(t *testing.T) { From ef3d0d243cf16a50a562cb6bbb033885b35f9fa8 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Tue, 22 Apr 2025 21:23:47 +0200 Subject: [PATCH 14/20] Stop using controlplanev1.KubeadmControlPlaneMachine*HealthyCondition in controllers --- .../kubeadm/internal/control_plane.go | 24 ++-- .../kubeadm/internal/control_plane_test.go | 32 ++---- .../internal/controllers/controller_test.go | 11 +- .../internal/controllers/remediation.go | 20 ++-- .../internal/controllers/remediation_test.go | 8 +- .../kubeadm/internal/controllers/scale.go | 32 +++--- .../internal/controllers/scale_test.go | 85 ++++++--------- .../internal/workload_cluster_conditions.go | 103 +++++++++--------- util/collections/machine_filters.go | 16 ++- util/collections/machine_filters_test.go | 83 +++++--------- 10 files changed, 176 insertions(+), 238 deletions(-) diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 5fe4b4f44ace..4d80c9d8362d 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -35,7 +35,7 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" "sigs.k8s.io/cluster-api/util/collections" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/failuredomains" "sigs.k8s.io/cluster-api/util/patch" ) @@ -399,16 +399,15 @@ func (c *ControlPlane) InjectTestManagementCluster(managementCluster ManagementC // // - etcdMembers list as reported by etcd. func (c *ControlPlane) StatusToLogKeyAndValues(newMachine, deletedMachine *clusterv1.Machine) []any { - // TODO (v1beta2) switch to v1beta2 conditions - controlPlaneMachineHealthConditions := []clusterv1.ConditionType{ - controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, - controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition, - controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition, + controlPlaneMachineHealthConditions := []string{ + controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, } if c.IsEtcdManaged() { controlPlaneMachineHealthConditions = append(controlPlaneMachineHealthConditions, - controlplanev1.MachineEtcdPodHealthyV1Beta1Condition, - controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, + controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, ) } @@ -424,13 +423,12 @@ func (c *ControlPlane) StatusToLogKeyAndValues(newMachine, deletedMachine *clust notes = append(notes, "marked for remediation") } - // TODO (v1beta2): test for v1beta2 conditions for _, condition := range controlPlaneMachineHealthConditions { - if v1beta1conditions.IsUnknown(m, condition) { - notes = append(notes, strings.Replace(string(condition), "Healthy", " health unknown", -1)) + if conditions.IsUnknown(m, condition) { + notes = append(notes, strings.Replace(condition, "Healthy", " health unknown", -1)) } - if v1beta1conditions.IsFalse(m, condition) { - notes = append(notes, strings.Replace(string(condition), "Healthy", " not healthy", -1)) + if conditions.IsFalse(m, condition) { + notes = append(notes, strings.Replace(condition, "Healthy", " not healthy", -1)) } } diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index ef93ca97131a..30a39d455655 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -293,16 +293,12 @@ func TestStatusToLogKeyAndValues(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "healthy"}, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{Name: "healthy-node"}, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - {Type: controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, Status: corev1.ConditionTrue}, - {Type: controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition, Status: corev1.ConditionTrue}, - {Type: controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition, Status: corev1.ConditionTrue}, - {Type: controlplanev1.MachineEtcdPodHealthyV1Beta1Condition, Status: corev1.ConditionTrue}, - {Type: controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, Status: corev1.ConditionTrue}, - }, - }, + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionTrue}, }, }, } @@ -311,16 +307,12 @@ func TestStatusToLogKeyAndValues(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "without-node"}, Status: clusterv1.MachineStatus{ NodeRef: nil, - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: []clusterv1.Condition{ - {Type: controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, Status: corev1.ConditionUnknown}, - {Type: controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition, Status: corev1.ConditionUnknown}, - {Type: controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition, Status: corev1.ConditionUnknown}, - {Type: controlplanev1.MachineEtcdPodHealthyV1Beta1Condition, Status: corev1.ConditionUnknown}, - {Type: controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, Status: corev1.ConditionFalse}, // not a real use case, but used to test a code branch. - }, - }, + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionUnknown}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionUnknown}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionUnknown}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionUnknown}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionFalse}, // not a real use case, but used to test a code branch. }, }, } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 29cb2fdad346..1d4040230922 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -3918,12 +3918,11 @@ func setMachineHealthy(m *clusterv1.Machine) { Kind: "Node", Name: "node-1", } - // TODO (v1beta2):use v1beta2 conditions - v1beta1conditions.MarkTrue(m, controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition) - v1beta1conditions.MarkTrue(m, controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition) - v1beta1conditions.MarkTrue(m, controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition) - v1beta1conditions.MarkTrue(m, controlplanev1.MachineEtcdPodHealthyV1Beta1Condition) - v1beta1conditions.MarkTrue(m, controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition) + conditions.Set(m, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}) + conditions.Set(m, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}) + conditions.Set(m, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}) + conditions.Set(m, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionTrue}) + conditions.Set(m, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionTrue}) } // newCluster return a CAPI cluster object. diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 6fb05274754f..c7f7e5d7f3b4 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -420,10 +420,10 @@ func pickMachineToBeRemediated(i, j *clusterv1.Machine, isEtcdManaged bool) bool // if one machine has unhealthy etcd member or pod, remediate first. if isEtcdManaged { - if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition); p != nil { + if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition); p != nil { return *p } - if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.MachineEtcdPodHealthyV1Beta1Condition); p != nil { + if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition); p != nil { return *p } @@ -432,13 +432,13 @@ func pickMachineToBeRemediated(i, j *clusterv1.Machine, isEtcdManaged bool) bool } // if one machine has unhealthy control plane component, remediate first. - if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition); p != nil { + if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition); p != nil { return *p } - if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition); p != nil { + if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition); p != nil { return *p } - if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition); p != nil { + if p := pickMachineToBeRemediatedByConditionState(i, j, controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition); p != nil { return *p } @@ -451,10 +451,9 @@ func pickMachineToBeRemediated(i, j *clusterv1.Machine, isEtcdManaged bool) bool // pickMachineToBeRemediatedByConditionState returns true if condition t report issue on machine i and not on machine j, // false if the vice-versa apply, or nil if condition t doesn't provide a discriminating criteria for picking one machine or another for remediation. -func pickMachineToBeRemediatedByConditionState(i, j *clusterv1.Machine, t clusterv1.ConditionType) *bool { - // TODO (v1beta2): test for v1beta2 conditions - iCondition := v1beta1conditions.IsTrue(i, t) - jCondition := v1beta1conditions.IsTrue(j, t) +func pickMachineToBeRemediatedByConditionState(i, j *clusterv1.Machine, conditionType string) *bool { + iCondition := conditions.IsTrue(i, conditionType) + jCondition := conditions.IsTrue(j, conditionType) if !iCondition && jCondition { return ptr.To(true) @@ -641,8 +640,7 @@ func (r *KubeadmControlPlaneReconciler) canSafelyRemoveEtcdMember(ctx context.Co } // Check member health as reported by machine's health conditions - // TODO (v1beta2): test for v1beta2 conditions - if !v1beta1conditions.IsTrue(machine, controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition) { + if !conditions.IsTrue(machine, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition) { targetUnhealthyMembers++ unhealthyMembers = append(unhealthyMembers, fmt.Sprintf("%s (%s)", etcdMember, machine.Name)) continue diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index fcb8571519c2..a5bd10a606ea 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -2107,25 +2107,25 @@ func withStuckRemediation() machineOption { func withHealthyEtcdMember() machineOption { return func(machine *clusterv1.Machine) { - v1beta1conditions.MarkTrue(machine, controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition) + conditions.Set(machine, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyReason}) } } func withUnhealthyEtcdMember() machineOption { return func(machine *clusterv1.Machine) { - v1beta1conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, controlplanev1.EtcdMemberUnhealthyV1Beta1Reason, clusterv1.ConditionSeverityError, "") + conditions.Set(machine, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberNotHealthyReason}) } } func withHealthyAPIServerPod() machineOption { return func(machine *clusterv1.Machine) { - v1beta1conditions.MarkTrue(machine, controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition) + conditions.Set(machine, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningReason}) } } func withUnhealthyAPIServerPod() machineOption { return func(machine *clusterv1.Machine) { - v1beta1conditions.MarkFalse(machine, controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, controlplanev1.ControlPlaneComponentsUnhealthyV1Beta1Reason, clusterv1.ConditionSeverityError, "") + conditions.Set(machine, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningReason}) } } diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index b65192f4dcfb..9ae3d0a7bfed 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -33,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/collections" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/version" ) @@ -203,15 +204,15 @@ func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, con } // Check machine health conditions; if there are conditions with False or Unknown, then wait. - allMachineHealthConditions := []clusterv1.ConditionType{ - controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, - controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition, - controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition, + allMachineHealthConditions := []string{ + controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, } if controlPlane.IsEtcdManaged() { allMachineHealthConditions = append(allMachineHealthConditions, - controlplanev1.MachineEtcdPodHealthyV1Beta1Condition, - controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, + controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, ) } machineErrors := []error{} @@ -240,7 +241,7 @@ loopmachines: } else { for _, condition := range allMachineHealthConditions { if err := preflightCheckCondition("Machine", machine, condition); err != nil { - if condition == controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition { + if condition == controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition { controlPlane.PreflightCheckResults.EtcdClusterNotHealthy = true } else { controlPlane.PreflightCheckResults.ControlPlaneComponentsNotHealthy = true @@ -262,17 +263,16 @@ loopmachines: return ctrl.Result{}, nil } -func preflightCheckCondition(kind string, obj v1beta1conditions.Getter, condition clusterv1.ConditionType) error { - // TODO (v1beta2): test for v1beta2 conditions - c := v1beta1conditions.Get(obj, condition) +func preflightCheckCondition(kind string, obj *clusterv1.Machine, conditionType string) error { + c := conditions.Get(obj, conditionType) if c == nil { - return errors.Errorf("%s %s does not have %s condition", kind, obj.GetName(), condition) + return errors.Errorf("%s %s does not have %s condition", kind, obj.GetName(), conditionType) } - if c.Status == corev1.ConditionFalse { - return errors.Errorf("%s %s reports %s condition is false (%s, %s)", kind, obj.GetName(), condition, c.Severity, c.Message) + if c.Status == metav1.ConditionFalse { + return errors.Errorf("%s %s reports %s condition is false (%s)", kind, obj.GetName(), conditionType, c.Message) } - if c.Status == corev1.ConditionUnknown { - return errors.Errorf("%s %s reports %s condition is unknown (%s)", kind, obj.GetName(), condition, c.Message) + if c.Status == metav1.ConditionUnknown { + return errors.Errorf("%s %s reports %s condition is unknown (%s)", kind, obj.GetName(), conditionType, c.Message) } return nil } diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index e3943defc361..f5d01b8f338f 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -38,7 +38,6 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { @@ -611,15 +610,13 @@ func TestPreflightChecks(t *testing.T) { Kind: "Node", Name: "node-1", }, - Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, "fooReason", clusterv1.ConditionSeverityError, ""), - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition), - }, - }}, + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionFalse}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionTrue}, + }, }, }, }, @@ -641,15 +638,13 @@ func TestPreflightChecks(t *testing.T) { Kind: "Node", Name: "node-1", }, - Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyV1Beta1Condition), - *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, "fooReason", clusterv1.ConditionSeverityError, ""), - }, - }}, + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionFalse}, + }, }, }, }, @@ -665,12 +660,10 @@ func TestPreflightChecks(t *testing.T) { name: "control plane with an healthy machine and an healthy kcp condition should pass", kcp: &controlplanev1.KubeadmControlPlane{ Status: controlplanev1.KubeadmControlPlaneStatus{ - Deprecated: &controlplanev1.KubeadmControlPlaneDeprecatedStatus{V1Beta1: &controlplanev1.KubeadmControlPlaneV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.EtcdClusterHealthyV1Beta1Condition), - }, - }}, + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyCondition, Status: metav1.ConditionTrue}, + }, }, }, machines: []*clusterv1.Machine{ @@ -680,15 +673,13 @@ func TestPreflightChecks(t *testing.T) { Kind: "Node", Name: "node-1", }, - Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition), - }, - }}, + Conditions: []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionTrue}, + }, }, }, }, @@ -727,7 +718,7 @@ func TestPreflightChecks(t *testing.T) { } func TestPreflightCheckCondition(t *testing.T) { - condition := clusterv1.ConditionType("fooCondition") + condition := "fooCondition" testCases := []struct { name string machine *clusterv1.Machine @@ -742,11 +733,9 @@ func TestPreflightCheckCondition(t *testing.T) { name: "false condition should return error", machine: &clusterv1.Machine{ Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.FalseCondition(condition, "fooReason", clusterv1.ConditionSeverityError, ""), - }, - }}, + Conditions: []metav1.Condition{ + {Type: condition, Status: metav1.ConditionFalse}, + }, }, }, expectErr: true, @@ -755,11 +744,9 @@ func TestPreflightCheckCondition(t *testing.T) { name: "unknown condition should return error", machine: &clusterv1.Machine{ Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.UnknownCondition(condition, "fooReason", ""), - }, - }}, + Conditions: []metav1.Condition{ + {Type: condition, Status: metav1.ConditionUnknown}, + }, }, }, expectErr: true, @@ -768,11 +755,9 @@ func TestPreflightCheckCondition(t *testing.T) { name: "true condition should not return error", machine: &clusterv1.Machine{ Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(condition), - }, - }}, + Conditions: []metav1.Condition{ + {Type: condition, Status: metav1.ConditionTrue}, + }, }, }, expectErr: false, diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index 71811e11d252..64ebdc92baf4 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -329,7 +329,7 @@ func (w *Workload) getCurrentEtcdMembersAndAlarms(ctx context.Context, machines func getNodeNamesSortedByLastKnownEtcdHealth(nodes *corev1.NodeList, machines collections.Machines) []string { // Get the list of nodes and the corresponding MachineEtcdMemberHealthyCondition eligibleNodes := sets.Set[string]{} - nodeEtcdHealthyCondition := map[string]clusterv1.Condition{} + nodeEtcdHealthyCondition := map[string]metav1.Condition{} for _, node := range nodes.Items { var machine *clusterv1.Machine @@ -345,14 +345,13 @@ func getNodeNamesSortedByLastKnownEtcdHealth(nodes *corev1.NodeList, machines co } eligibleNodes.Insert(node.Name) - // TODO (v1beta2): test for v1beta2 conditions - if c := v1beta1conditions.Get(machine, controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition); c != nil { + if c := conditions.Get(machine, controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition); c != nil { nodeEtcdHealthyCondition[node.Name] = *c continue } - nodeEtcdHealthyCondition[node.Name] = clusterv1.Condition{ - Type: controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, - Status: corev1.ConditionUnknown, + nodeEtcdHealthyCondition[node.Name] = metav1.Condition{ + Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, + Status: metav1.ConditionUnknown, } } @@ -364,10 +363,10 @@ func getNodeNamesSortedByLastKnownEtcdHealth(nodes *corev1.NodeList, machines co // Nodes with last known etcd healthy members goes first, because most likely we can connect to them again. // NOTE: This isn't always true, it is a best effort assumption (e.g. kubelet might have issues preventing connection to an healthy member to be established). - if iCondition.Status == corev1.ConditionTrue && jCondition.Status != corev1.ConditionTrue { + if iCondition.Status == metav1.ConditionTrue && jCondition.Status != metav1.ConditionTrue { return true } - if iCondition.Status != corev1.ConditionTrue && jCondition.Status == corev1.ConditionTrue { + if iCondition.Status != metav1.ConditionTrue && jCondition.Status == metav1.ConditionTrue { return false } @@ -472,22 +471,22 @@ func compareMachinesAndMembers(controlPlane *ControlPlane, nodes *corev1.NodeLis // components running in a static pod generated by kubeadm. This operation is best effort, in the sense that in case // of problems in retrieving the pod status, it sets the condition to Unknown state without returning any error. func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane *ControlPlane) { - allMachinePodConditions := []clusterv1.ConditionType{ + allMachinePodV1Beta1Conditions := []clusterv1.ConditionType{ controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition, controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition, } if controlPlane.IsEtcdManaged() { - allMachinePodConditions = append(allMachinePodConditions, controlplanev1.MachineEtcdPodHealthyV1Beta1Condition) + allMachinePodV1Beta1Conditions = append(allMachinePodV1Beta1Conditions, controlplanev1.MachineEtcdPodHealthyV1Beta1Condition) } - allMachinePodV1beta2Conditions := []string{ + allMachinePodConditions := []string{ controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, } if controlPlane.IsEtcdManaged() { - allMachinePodV1beta2Conditions = append(allMachinePodV1beta2Conditions, controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition) + allMachinePodConditions = append(allMachinePodConditions, controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition) } // NOTE: this fun uses control plane nodes from the workload cluster as a source of truth for the current state. @@ -495,11 +494,11 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * if err != nil { for i := range controlPlane.Machines { machine := controlPlane.Machines[i] - for _, condition := range allMachinePodConditions { + for _, condition := range allMachinePodV1Beta1Conditions { v1beta1conditions.MarkUnknown(machine, condition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Failed to get the Node which is hosting this component: %v", err) } - for _, condition := range allMachinePodV1beta2Conditions { + for _, condition := range allMachinePodConditions { conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, @@ -525,7 +524,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * provisioningMachines := controlPlane.Machines.Filter(collections.Not(collections.HasNode())) for _, machine := range provisioningMachines { - for _, condition := range allMachinePodV1beta2Conditions { + for _, condition := range allMachinePodConditions { var msg string if ptr.Deref(machine.Spec.ProviderID, "") != "" { // If the machine is at the end of the provisioning phase, with ProviderID set, but still waiting @@ -567,11 +566,11 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * // If the machine is deleting, report all the conditions as deleting if !machine.ObjectMeta.DeletionTimestamp.IsZero() { - for _, condition := range allMachinePodConditions { + for _, condition := range allMachinePodV1Beta1Conditions { v1beta1conditions.MarkFalse(machine, condition, clusterv1.DeletingV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") } - for _, condition := range allMachinePodV1beta2Conditions { + for _, condition := range allMachinePodConditions { conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionFalse, @@ -586,11 +585,11 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * if nodeHasUnreachableTaint(node) { // NOTE: We are assuming unreachable as a temporary condition, leaving to MHC // the responsibility to determine if the node is unhealthy or not. - for _, condition := range allMachinePodConditions { + for _, condition := range allMachinePodV1Beta1Conditions { v1beta1conditions.MarkUnknown(machine, condition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Node is unreachable") } - for _, condition := range allMachinePodV1beta2Conditions { + for _, condition := range allMachinePodConditions { conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, @@ -624,11 +623,11 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * } } if !found { - for _, condition := range allMachinePodConditions { + for _, condition := range allMachinePodV1Beta1Conditions { v1beta1conditions.MarkFalse(machine, condition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "Missing Node") } - for _, condition := range allMachinePodV1beta2Conditions { + for _, condition := range allMachinePodConditions { conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, @@ -642,7 +641,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * // Aggregate components error from machines at KCP level. aggregateV1Beta1ConditionsFromMachinesToKCP(aggregateV1Beta1ConditionsFromMachinesToKCPInput{ controlPlane: controlPlane, - machineConditions: allMachinePodConditions, + machineConditions: allMachinePodV1Beta1Conditions, kcpErrors: kcpErrors, condition: controlplanev1.ControlPlaneComponentsHealthyV1Beta1Condition, unhealthyReason: controlplanev1.ControlPlaneComponentsUnhealthyV1Beta1Reason, @@ -652,7 +651,7 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * aggregateConditionsFromMachinesToKCP(aggregateConditionsFromMachinesToKCPInput{ controlPlane: controlPlane, - machineConditions: allMachinePodV1beta2Conditions, + machineConditions: allMachinePodConditions, kcpErrors: kcpErrors, condition: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyCondition, falseReason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsNotHealthyReason, @@ -675,16 +674,16 @@ func nodeHasUnreachableTaint(node corev1.Node) bool { // updateStaticPodCondition is responsible for updating machine conditions reflecting the status of a component running // in a static pod generated by kubeadm. This operation is best effort, in the sense that in case of problems // in retrieving the pod status, it sets the condition to Unknown state without returning any error. -func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *clusterv1.Machine, node corev1.Node, component string, staticPodCondition clusterv1.ConditionType, staticPodV1beta2Condition string) { +func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *clusterv1.Machine, node corev1.Node, component string, staticPodV1Beta1Condition clusterv1.ConditionType, staticPodCondition string) { log := ctrl.LoggerFrom(ctx) // If node ready is unknown there is a good chance that kubelet is not updating mirror pods, so we consider pod status // to be unknown as well without further investigations. if nodeReadyUnknown(node) { - v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Node Ready condition is Unknown, Pod data might be stale") + v1beta1conditions.MarkUnknown(machine, staticPodV1Beta1Condition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Node Ready condition is Unknown, Pod data might be stale") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedReason, Message: "Node Ready condition is Unknown, Pod data might be stale", @@ -701,20 +700,20 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste if err := w.Client.Get(ctx, podKey, &pod); err != nil { // If there is an error getting the Pod, do not set any conditions. if apierrors.IsNotFound(err) { - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodMissingV1Beta1Reason, clusterv1.ConditionSeverityError, "Pod %s is missing", podKey.Name) + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodMissingV1Beta1Reason, clusterv1.ConditionSeverityError, "Pod %s is missing", podKey.Name) conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodDoesNotExistReason, Message: "Pod does not exist", }) return } - v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Failed to get Pod status") + v1beta1conditions.MarkUnknown(machine, staticPodV1Beta1Condition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Failed to get Pod status") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedReason, Message: "Please check controller logs for errors", @@ -732,10 +731,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // Check if the container is still to be scheduled // NOTE: This should never happen for static pods, however this check is implemented for completeness. if podCondition(pod, corev1.PodScheduled) != corev1.ConditionTrue { - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled") + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningReason, Message: "Waiting to be scheduled", @@ -746,10 +745,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // Check if the container is still running init containers // NOTE: As of today there are not init containers in static pods generated by kubeadm, however this check is implemented for completeness. if podCondition(pod, corev1.PodInitialized) != corev1.ConditionTrue { - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Running init containers") + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Running init containers") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningReason, Message: "Running init containers", @@ -758,10 +757,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } // If there are no error from containers, report provisioning without further details. - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningReason, }) @@ -773,10 +772,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // PodReady condition means the pod is able to service requests if podCondition(pod, corev1.PodReady) == corev1.ConditionTrue { - v1beta1conditions.MarkTrue(machine, staticPodCondition) + v1beta1conditions.MarkTrue(machine, staticPodV1Beta1Condition) conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneMachinePodRunningReason, }) @@ -798,10 +797,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } if len(containerWaitingMessages) > 0 { if terminatedWithError { - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, strings.Join(containerWaitingMessages, ", ")) + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, strings.Join(containerWaitingMessages, ", ")) conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedReason, Message: strings.Join(containerWaitingMessages, ", "), @@ -810,10 +809,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } // Note: Some error cases cannot be caught when container state == "Waiting", // e.g., "waiting.reason: ErrImagePull" is an error, but since LastTerminationState does not exist, this cannot be differentiated from "PodProvisioningReason" - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, strings.Join(containerWaitingMessages, ", ")) + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, strings.Join(containerWaitingMessages, ", ")) conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningReason, Message: strings.Join(containerWaitingMessages, ", "), @@ -829,10 +828,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste } } if len(containerTerminatedMessages) > 0 { - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, strings.Join(containerTerminatedMessages, ", ")) + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, strings.Join(containerTerminatedMessages, ", ")) conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedReason, Message: strings.Join(containerTerminatedMessages, ", "), @@ -842,10 +841,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // If the pod is not yet ready, most probably it is waiting for startup or readiness probes. // Report this as part of the provisioning process because the corresponding control plane component is not ready yet. - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes") + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodProvisioningV1Beta1Reason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodProvisioningReason, Message: "Waiting for startup or readiness probes", @@ -854,10 +853,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // PodSucceeded means that all containers in the pod have voluntarily terminated // with a container exit code of 0, and the system is not going to restart any of these containers. // NOTE: This should never happen for the static pods running control plane components. - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "All the containers have been terminated") + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "All the containers have been terminated") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedReason, Message: "All the containers have been terminated", @@ -866,10 +865,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste // PodFailed means that all containers in the pod have terminated, and at least one container has // terminated in a failure (exited with a non-zero exit code or was stopped by the system). // NOTE: This should never happen for the static pods running control plane components. - v1beta1conditions.MarkFalse(machine, staticPodCondition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "All the containers have been terminated") + v1beta1conditions.MarkFalse(machine, staticPodV1Beta1Condition, controlplanev1.PodFailedV1Beta1Reason, clusterv1.ConditionSeverityError, "All the containers have been terminated") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachinePodFailedReason, Message: "All the containers have been terminated", @@ -877,10 +876,10 @@ func (w *Workload) updateStaticPodCondition(ctx context.Context, machine *cluste case corev1.PodUnknown: // PodUnknown means that for some reason the state of the pod could not be obtained, typically due // to an error in communicating with the host of the pod. - v1beta1conditions.MarkUnknown(machine, staticPodCondition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Pod is reporting Unknown status") + v1beta1conditions.MarkUnknown(machine, staticPodV1Beta1Condition, controlplanev1.PodInspectionFailedV1Beta1Reason, "Pod is reporting Unknown status") conditions.Set(machine, metav1.Condition{ - Type: staticPodV1beta2Condition, + Type: staticPodCondition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedReason, Message: "Pod is reporting Unknown status", diff --git a/util/collections/machine_filters.go b/util/collections/machine_filters.go index 19267bad59cd..4e6ee603acdd 100644 --- a/util/collections/machine_filters.go +++ b/util/collections/machine_filters.go @@ -29,7 +29,6 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // Func is the functon definition for a filter. @@ -173,15 +172,15 @@ func IsUnhealthy(machine *clusterv1.Machine) bool { // APIServerPodHealthy, ControllerManagerPodHealthy, SchedulerPodHealthy, EtcdPodHealthy & EtcdMemberHealthy (if using managed etcd). // It is different from the HasUnhealthyCondition func which checks MachineHealthCheck conditions. func HasUnhealthyControlPlaneComponents(isEtcdManaged bool) Func { - controlPlaneMachineHealthConditions := []clusterv1.ConditionType{ - controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, - controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition, - controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition, + controlPlaneMachineHealthConditions := []string{ + controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, } if isEtcdManaged { controlPlaneMachineHealthConditions = append(controlPlaneMachineHealthConditions, - controlplanev1.MachineEtcdPodHealthyV1Beta1Condition, - controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, + controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, + controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, ) } return func(machine *clusterv1.Machine) bool { @@ -196,8 +195,7 @@ func HasUnhealthyControlPlaneComponents(isEtcdManaged bool) Func { // Do not return true when the condition is not set or is set to Unknown because // it means a transient state and can not be considered as unhealthy. // preflightCheckCondition() can cover these two cases and skip the scaling up/down. - // TODO (v1beta2): test for v1beta2 conditions - if v1beta1conditions.IsFalse(machine, condition) { + if conditions.IsFalse(machine, condition) { return true } } diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index d346813980d5..ed7a63edcba9 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -30,7 +30,6 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" - v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func falseFilter(_ *clusterv1.Machine) bool { @@ -470,15 +469,10 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.NodeRef = &corev1.ObjectReference{ Name: "node1", } - // TODO (v1beta2) Use new conditions - machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - }, - }, + machine.Status.Conditions = []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, } g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(machine)).To(BeFalse()) }) @@ -489,16 +483,10 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.NodeRef = &corev1.ObjectReference{ Name: "node1", } - // TODO (v1beta2) Use new conditions - machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - *v1beta1conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition, "", - clusterv1.ConditionSeverityWarning, ""), - }, - }, + machine.Status.Conditions = []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionFalse}, } g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(machine)).To(BeTrue()) }) @@ -509,19 +497,12 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.NodeRef = &corev1.ObjectReference{ Name: "node1", } - // TODO (v1beta2) Use new conditions - machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyV1Beta1Condition, "", - clusterv1.ConditionSeverityWarning, ""), - *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, "", - clusterv1.ConditionSeverityWarning, ""), - }, - }, + machine.Status.Conditions = []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionFalse}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionFalse}, } g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(machine)).To(BeFalse()) }) @@ -532,19 +513,12 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.NodeRef = &corev1.ObjectReference{ Name: "node1", } - // TODO (v1beta2) Use new conditions - machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyV1Beta1Condition, "", - clusterv1.ConditionSeverityWarning, ""), - *v1beta1conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition, "", - clusterv1.ConditionSeverityWarning, ""), - }, - }, + machine.Status.Conditions = []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionFalse}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionFalse}, } g.Expect(collections.HasUnhealthyControlPlaneComponents(true)(machine)).To(BeTrue()) }) @@ -555,17 +529,12 @@ func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { machine.Status.NodeRef = &corev1.ObjectReference{ Name: "node1", } - // TODO (v1beta2) Use new conditions - machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - *v1beta1conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyV1Beta1Condition), - *v1beta1conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyV1Beta1Condition), - }, - }, + machine.Status.Conditions = []metav1.Condition{ + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyCondition, Status: metav1.ConditionTrue}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyCondition, Status: metav1.ConditionTrue}, } g.Expect(collections.HasUnhealthyControlPlaneComponents(true)(machine)).To(BeFalse()) }) From c9c604d954fc0dc02d8e1a2ad58e5bc3db0fd95e Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Tue, 22 Apr 2025 21:36:27 +0200 Subject: [PATCH 15/20] Stop using controlplanev1.controlplanev1.AvailableV1Beta1Condition in controllers --- .../internal/controllers/controller.go | 5 ++-- .../internal/controllers/controller_test.go | 24 ------------------- 2 files changed, 2 insertions(+), 27 deletions(-) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 12b96dd35414..5926a0859a7c 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -893,10 +893,9 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneAndMachinesConditio // Note: The Machine controller uses the ControlPlaneInitialized condition on the Cluster instead for // the same check. We don't use the ControlPlaneInitialized condition from the Cluster here because KCP // Reconcile does (currently) not get triggered from condition changes to the Cluster object. - // TODO (v1beta2): Once we moved to v1beta2 conditions we should use the `Initialized` condition instead. - controlPlaneInitialized := v1beta1conditions.Get(controlPlane.KCP, controlplanev1.AvailableV1Beta1Condition) + controlPlaneInitialized := conditions.Get(controlPlane.KCP, controlplanev1.KubeadmControlPlaneInitializedCondition) if controlPlane.KCP.Status.Initialization == nil || !controlPlane.KCP.Status.Initialization.ControlPlaneInitialized || - controlPlaneInitialized == nil || controlPlaneInitialized.Status != corev1.ConditionTrue { + controlPlaneInitialized == nil || controlPlaneInitialized.Status != metav1.ConditionTrue { // Overwrite conditions to InspectionFailed. setConditionsToUnknown(setConditionsToUnknownInput{ ControlPlane: controlPlane, diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 1d4040230922..d8425341ceeb 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -2132,14 +2132,6 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Initialization: &controlplanev1.KubeadmControlPlaneInitializationStatus{ ControlPlaneInitialized: true, }, - Deprecated: &controlplanev1.KubeadmControlPlaneDeprecatedStatus{ - V1Beta1: &controlplanev1.KubeadmControlPlaneV1Beta1DeprecatedStatus{ - Conditions: clusterv1.Conditions{ - {Type: controlplanev1.AvailableV1Beta1Condition, Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now.Add(-5 * time.Second)}}, - }, - }, - }, Conditions: []metav1.Condition{ { Type: controlplanev1.KubeadmControlPlaneInitializedCondition, @@ -2166,7 +2158,6 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition KCP: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKCP.DeepCopy() kcp.Status.Initialization = &controlplanev1.KubeadmControlPlaneInitializationStatus{ControlPlaneInitialized: false} - v1beta1conditions.MarkFalse(kcp, controlplanev1.AvailableV1Beta1Condition, "", clusterv1.ConditionSeverityError, "") conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneInitializedCondition, Status: metav1.ConditionFalse, @@ -2396,11 +2387,6 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Cluster: defaultCluster, KCP: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKCP.DeepCopy() - for i, condition := range kcp.Status.Deprecated.V1Beta1.Conditions { - if condition.Type == controlplanev1.AvailableV1Beta1Condition { - kcp.Status.Deprecated.V1Beta1.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) - } - } for i, condition := range kcp.Status.Conditions { if condition.Type == controlplanev1.KubeadmControlPlaneInitializedCondition { kcp.Status.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) @@ -2484,11 +2470,6 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Cluster: defaultCluster, KCP: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKCP.DeepCopy() - for i, condition := range kcp.Status.Deprecated.V1Beta1.Conditions { - if condition.Type == controlplanev1.AvailableV1Beta1Condition { - kcp.Status.Deprecated.V1Beta1.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) - } - } for i, condition := range kcp.Status.Conditions { if condition.Type == controlplanev1.KubeadmControlPlaneInitializedCondition { kcp.Status.Conditions[i].LastTransitionTime.Time = now.Add(-4 * time.Minute) @@ -2573,11 +2554,6 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Cluster: defaultCluster, KCP: func() *controlplanev1.KubeadmControlPlane { kcp := defaultKCP.DeepCopy() - for i, condition := range kcp.Status.Deprecated.V1Beta1.Conditions { - if condition.Type == controlplanev1.AvailableV1Beta1Condition { - kcp.Status.Deprecated.V1Beta1.Conditions[i].LastTransitionTime.Time = now.Add(-7 * time.Minute) - } - } for i, condition := range kcp.Status.Conditions { if condition.Type == controlplanev1.KubeadmControlPlaneInitializedCondition { kcp.Status.Conditions[i].LastTransitionTime.Time = now.Add(-7 * time.Minute) From c639a9aba9a7f1f68b6b033af2f12c4f30bf947c Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Tue, 22 Apr 2025 21:48:13 +0200 Subject: [PATCH 16/20] Stop using controlplanev1..EtcdClusterHealthyV1Beta1Condition and controlplanev1.ControlPlaneComponentsHealthyV1Beta1Condition in controllers --- controlplane/kubeadm/internal/controllers/controller.go | 6 ++---- .../kubeadm/internal/controllers/controller_test.go | 5 ++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 5926a0859a7c..7251110eebda 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -250,8 +250,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. // status without waiting for a full resync (by default 10 minutes). // Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled. // The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive. - // TODO (v1beta2): test for v1beta2 conditions - if v1beta1conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyV1Beta1Condition) { + if conditions.IsFalse(kcp, controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyCondition) { res = ctrl.Result{RequeueAfter: 20 * time.Second} } } @@ -1101,8 +1100,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context // Potential inconsistencies between the list of members and the list of machines/nodes are // surfaced using the EtcdClusterHealthyCondition; if this condition is true, meaning no inconsistencies exists, return early. - // TODO (v1beta2): test for v1beta2 conditions - if v1beta1conditions.IsTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyV1Beta1Condition) { + if conditions.IsTrue(controlPlane.KCP, controlplanev1.KubeadmControlPlaneEtcdClusterHealthyCondition) { return nil } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index d8425341ceeb..b064998f81da 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -3828,9 +3828,8 @@ func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *contr } func setKCPHealthy(kcp *controlplanev1.KubeadmControlPlane) { - // TODO (v1beta2):use v1beta2 conditions - v1beta1conditions.MarkTrue(kcp, controlplanev1.ControlPlaneComponentsHealthyV1Beta1Condition) - v1beta1conditions.MarkTrue(kcp, controlplanev1.EtcdClusterHealthyV1Beta1Condition) + conditions.Set(kcp, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyCondition, Status: metav1.ConditionTrue}) + conditions.Set(kcp, metav1.Condition{Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyCondition, Status: metav1.ConditionTrue}) } func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ready bool) (*clusterv1.Machine, *corev1.Node) { From 15c42b8984b18ac9309ca5fc2784554a4adb6f07 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Wed, 23 Apr 2025 13:36:20 +0200 Subject: [PATCH 17/20] Drop unnecessary TODO --- util/conditions/deprecated/v1beta1/patch_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/util/conditions/deprecated/v1beta1/patch_test.go b/util/conditions/deprecated/v1beta1/patch_test.go index 0e5d17aeb403..088aec4f81cd 100644 --- a/util/conditions/deprecated/v1beta1/patch_test.go +++ b/util/conditions/deprecated/v1beta1/patch_test.go @@ -318,7 +318,6 @@ func TestApplyDoesNotAlterLastTransitionTime(t *testing.T) { before := &clusterv1.Cluster{} after := &clusterv1.Cluster{ Status: clusterv1.ClusterStatus{ - // TODO (v1beta2) Use new conditions Deprecated: &clusterv1.ClusterDeprecatedStatus{ V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ Conditions: clusterv1.Conditions{ From 7c0d63c8e8a7fdcf66566db5f9283efe98ae5bdd Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Wed, 23 Apr 2025 14:23:41 +0200 Subject: [PATCH 18/20] Stop using FailureReason and FailureMessage in controllers --- api/v1beta2/cluster_phase_types.go | 3 + api/v1beta2/machine_phase_types.go | 3 + api/v1beta2/machinedeployment_types.go | 3 + exp/api/v1beta2/machinepool_types.go | 3 + .../machinepool_controller_phases.go | 5 -- .../machinepool_controller_phases_test.go | 65 ------------------- .../cluster/cluster_controller_phases.go | 13 +--- .../cluster/cluster_controller_phases_test.go | 45 ------------- .../machine/machine_controller_status.go | 5 -- .../machinedeployment_sync.go | 8 --- .../machinedeployment_sync_test.go | 59 ----------------- .../machinehealthcheck_controller_test.go | 39 +---------- .../machinehealthcheck_targets.go | 12 ---- .../machinehealthcheck_targets_test.go | 49 -------------- .../machineset/machineset_delete_policy.go | 3 - .../machineset_delete_policy_test.go | 55 ++++++++-------- 16 files changed, 43 insertions(+), 327 deletions(-) diff --git a/api/v1beta2/cluster_phase_types.go b/api/v1beta2/cluster_phase_types.go index 89a3dbbc240c..3396d74dbad6 100644 --- a/api/v1beta2/cluster_phase_types.go +++ b/api/v1beta2/cluster_phase_types.go @@ -49,6 +49,9 @@ const ( // ClusterPhaseFailed is the Cluster state when the system // might require user intervention. + // + // Deprecated: This phase is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. + // ClusterPhaseFailed = ClusterPhase("Failed") // ClusterPhaseUnknown is returned if the Cluster state cannot be determined. diff --git a/api/v1beta2/machine_phase_types.go b/api/v1beta2/machine_phase_types.go index 4467586c30d5..47ab61528b3d 100644 --- a/api/v1beta2/machine_phase_types.go +++ b/api/v1beta2/machine_phase_types.go @@ -57,6 +57,9 @@ const ( // MachinePhaseFailed is the Machine state when the system // might require user intervention. + // + // Deprecated: This phase is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. + // MachinePhaseFailed = MachinePhase("Failed") // MachinePhaseUnknown is returned if the Machine state cannot be determined. diff --git a/api/v1beta2/machinedeployment_types.go b/api/v1beta2/machinedeployment_types.go index f9791be8fd41..ad7b3df4aab1 100644 --- a/api/v1beta2/machinedeployment_types.go +++ b/api/v1beta2/machinedeployment_types.go @@ -567,6 +567,9 @@ const ( MachineDeploymentPhaseRunning = MachineDeploymentPhase("Running") // MachineDeploymentPhaseFailed indicates there was a problem scaling and user intervention might be required. + // + // Deprecated: This phase is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. + // MachineDeploymentPhaseFailed = MachineDeploymentPhase("Failed") // MachineDeploymentPhaseUnknown indicates the state of the MachineDeployment cannot be determined. diff --git a/exp/api/v1beta2/machinepool_types.go b/exp/api/v1beta2/machinepool_types.go index 735fba745d34..742b64f3b754 100644 --- a/exp/api/v1beta2/machinepool_types.go +++ b/exp/api/v1beta2/machinepool_types.go @@ -255,6 +255,9 @@ const ( // MachinePoolPhaseFailed is the MachinePool state when the system // might require user intervention. + // + // Deprecated: This phase is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. + // MachinePoolPhaseFailed = MachinePoolPhase("Failed") // MachinePoolPhaseUnknown is returned if the MachinePool state cannot be determined. diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index 26279c7ce8a1..e7d5d7a202a0 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -101,11 +101,6 @@ func (r *MachinePoolReconciler) reconcilePhase(mp *expv1.MachinePool) { } } - // Set the phase to "failed" if any of Status.FailureReason or Status.FailureMessage is not-nil. - if mp.Status.Deprecated != nil && mp.Status.Deprecated.V1Beta1 != nil && (mp.Status.Deprecated.V1Beta1.FailureReason != nil || mp.Status.Deprecated.V1Beta1.FailureMessage != nil) { - mp.Status.SetTypedPhase(expv1.MachinePoolPhaseFailed) - } - // Set the phase to "deleting" if the deletion timestamp is set. if !mp.DeletionTimestamp.IsZero() { mp.Status.SetTypedPhase(expv1.MachinePoolPhaseDeleting) diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index a98fcbe09e90..077c6ab108a6 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -1218,71 +1218,6 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { g.Expect(m.Status.Initialization != nil && m.Status.Initialization.InfrastructureProvisioned).To(BeTrue()) }, }, - { - name: "ready bootstrap, infra, and nodeRef, machinepool is running, infra object is deleted, expect failed", - machinepool: &expv1.MachinePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "machinepool-test", - Namespace: metav1.NamespaceDefault, - }, - Spec: expv1.MachinePoolSpec{ - Replicas: ptr.To[int32](1), - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: builder.BootstrapGroupVersion.String(), - Kind: builder.TestBootstrapConfigKind, - Name: "bootstrap-config1", - Namespace: metav1.NamespaceDefault, - }, - }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: builder.InfrastructureGroupVersion.String(), - Kind: builder.TestInfrastructureMachineTemplateKind, - Name: "infra-config1", - Namespace: metav1.NamespaceDefault, - }, - }, - }, - }, - Status: expv1.MachinePoolStatus{ - Initialization: &expv1.MachinePoolInitializationStatus{ - InfrastructureProvisioned: true, - BootstrapDataSecretCreated: true, - }, - NodeRefs: []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}}, - }, - }, - bootstrapConfig: map[string]interface{}{ - "kind": builder.TestBootstrapConfigKind, - "apiVersion": builder.BootstrapGroupVersion.String(), - "metadata": map[string]interface{}{ - "name": "bootstrap-config1", - "namespace": metav1.NamespaceDefault, - }, - "spec": map[string]interface{}{}, - "status": map[string]interface{}{ - "initialization": map[string]interface{}{ - "dataSecretCreated": true, - }, - "dataSecretName": "secret-data", - }, - }, - infraConfig: map[string]interface{}{ - "kind": builder.TestInfrastructureMachineTemplateKind, - "apiVersion": builder.InfrastructureGroupVersion.String(), - "metadata": map[string]interface{}{}, - }, - expectError: true, - expectRequeueAfter: false, - expected: func(g *WithT, m *expv1.MachinePool) { - g.Expect(m.Status.Initialization != nil && m.Status.Initialization.InfrastructureProvisioned).To(BeTrue()) - g.Expect(m.Status.Deprecated.V1Beta1.FailureMessage).ToNot(BeNil()) - g.Expect(m.Status.Deprecated.V1Beta1.FailureReason).ToNot(BeNil()) - g.Expect(m.Status.GetTypedPhase()).To(Equal(expv1.MachinePoolPhaseFailed)) - }, - }, { name: "ready bootstrap, infra, and nodeRef, machinepool is running, replicas 0, providerIDList not set", machinepool: &expv1.MachinePool{ diff --git a/internal/controllers/cluster/cluster_controller_phases.go b/internal/controllers/cluster/cluster_controller_phases.go index 3344935a5f4e..c6df98f219dd 100644 --- a/internal/controllers/cluster/cluster_controller_phases.go +++ b/internal/controllers/cluster/cluster_controller_phases.go @@ -64,24 +64,13 @@ func (r *Reconciler) reconcilePhase(_ context.Context, cluster *clusterv1.Cluste cluster.Status.SetTypedPhase(clusterv1.ClusterPhaseProvisioned) } - failureMessage := "" - if cluster.Status.Deprecated != nil && cluster.Status.Deprecated.V1Beta1 != nil && (cluster.Status.Deprecated.V1Beta1.FailureReason != nil || cluster.Status.Deprecated.V1Beta1.FailureMessage != nil) { - cluster.Status.SetTypedPhase(clusterv1.ClusterPhaseFailed) - failureMessage = ptr.Deref(cluster.Status.Deprecated.V1Beta1.FailureMessage, "unknown") - } - if !cluster.DeletionTimestamp.IsZero() { cluster.Status.SetTypedPhase(clusterv1.ClusterPhaseDeleting) } // Only record the event if the status has changed if preReconcilePhase != cluster.Status.GetTypedPhase() { - // Failed clusters should get a Warning event - if cluster.Status.GetTypedPhase() == clusterv1.ClusterPhaseFailed { - r.recorder.Eventf(cluster, corev1.EventTypeWarning, string(cluster.Status.GetTypedPhase()), "Cluster %s is %s: %s", cluster.Name, string(cluster.Status.GetTypedPhase()), failureMessage) - } else { - r.recorder.Eventf(cluster, corev1.EventTypeNormal, string(cluster.Status.GetTypedPhase()), "Cluster %s is %s", cluster.Name, string(cluster.Status.GetTypedPhase())) - } + r.recorder.Eventf(cluster, corev1.EventTypeNormal, string(cluster.Status.GetTypedPhase()), "Cluster %s is %s", cluster.Name, string(cluster.Status.GetTypedPhase())) } } diff --git a/internal/controllers/cluster/cluster_controller_phases_test.go b/internal/controllers/cluster/cluster_controller_phases_test.go index 5b2ad8e1120b..93848c1dece8 100644 --- a/internal/controllers/cluster/cluster_controller_phases_test.go +++ b/internal/controllers/cluster/cluster_controller_phases_test.go @@ -36,7 +36,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" externalfake "sigs.k8s.io/cluster-api/controllers/external/fake" - capierrors "sigs.k8s.io/cluster-api/errors" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/test/builder" ) @@ -673,8 +672,6 @@ func TestClusterReconciler_reconcilePhase(t *testing.T) { Status: clusterv1.ClusterStatus{}, Spec: clusterv1.ClusterSpec{}, } - createClusterError := capierrors.CreateClusterError - failureMsg := "Create failed" tests := []struct { name string @@ -756,48 +753,6 @@ func TestClusterReconciler_reconcilePhase(t *testing.T) { wantPhase: clusterv1.ClusterPhaseProvisioned, }, - { - name: "cluster status has FailureReason", - cluster: &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - }, - Status: clusterv1.ClusterStatus{ - Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - FailureReason: &createClusterError, - }, - }, - }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{}, - }, - }, - - wantPhase: clusterv1.ClusterPhaseFailed, - }, - { - name: "cluster status has FailureMessage", - cluster: &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - }, - Status: clusterv1.ClusterStatus{ - Initialization: &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true}, - Deprecated: &clusterv1.ClusterDeprecatedStatus{ - V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ - FailureMessage: &failureMsg, - }, - }, - }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{}, - }, - }, - - wantPhase: clusterv1.ClusterPhaseFailed, - }, { name: "cluster has deletion timestamp", cluster: &clusterv1.Cluster{ diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index a651c5ff7f52..0074e52cdc76 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -804,11 +804,6 @@ func setMachinePhaseAndLastUpdated(_ context.Context, m *clusterv1.Machine) { m.Status.SetTypedPhase(clusterv1.MachinePhaseRunning) } - // Set the phase to "failed" if any of Status.FailureReason or Status.FailureMessage is not-nil. - if m.Status.Deprecated != nil && m.Status.Deprecated.V1Beta1 != nil && (m.Status.Deprecated.V1Beta1.FailureReason != nil || m.Status.Deprecated.V1Beta1.FailureMessage != nil) { - m.Status.SetTypedPhase(clusterv1.MachinePhaseFailed) - } - // Set the phase to "deleting" if the deletion timestamp is set. if !m.DeletionTimestamp.IsZero() { m.Status.SetTypedPhase(clusterv1.MachinePhaseDeleting) diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index c6bebe9d12f7..eab4e14dfc5d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -563,14 +563,6 @@ func calculateStatus(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet if totalReplicas-availableReplicas < 0 { status.Phase = string(clusterv1.MachineDeploymentPhaseScalingDown) } - for _, ms := range allMSs { - if ms != nil { - if ms.Status.Deprecated != nil && ms.Status.Deprecated.V1Beta1 != nil && (ms.Status.Deprecated.V1Beta1.FailureReason != nil || ms.Status.Deprecated.V1Beta1.FailureMessage != nil) { - status.Phase = string(clusterv1.MachineDeploymentPhaseFailed) - break - } - } - } return status } diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index 78ba734aaed1..e91623d13509 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -35,14 +35,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) func TestCalculateStatus(t *testing.T) { - msStatusError := capierrors.MachineSetStatusError("some failure") - var tests = map[string]struct { machineSets []*clusterv1.MachineSet newMachineSet *clusterv1.MachineSet @@ -214,62 +211,6 @@ func TestCalculateStatus(t *testing.T) { Phase: "ScalingDown", }, }, - "MachineSet failed": { - machineSets: []*clusterv1.MachineSet{{ - Spec: clusterv1.MachineSetSpec{ - Replicas: ptr.To[int32](2), - }, - Status: clusterv1.MachineSetStatus{ - Selector: "", - Replicas: 2, - Deprecated: &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - AvailableReplicas: 0, - ReadyReplicas: 0, - FailureReason: &msStatusError, - }, - }, - ObservedGeneration: 1, - }, - }}, - newMachineSet: &clusterv1.MachineSet{ - Spec: clusterv1.MachineSetSpec{ - Replicas: ptr.To[int32](2), - }, - Status: clusterv1.MachineSetStatus{ - Selector: "", - Replicas: 2, - Deprecated: &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - AvailableReplicas: 0, - ReadyReplicas: 0, - }, - }, - ObservedGeneration: 1, - }, - }, - deployment: &clusterv1.MachineDeployment{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 2, - }, - Spec: clusterv1.MachineDeploymentSpec{ - Replicas: ptr.To[int32](2), - }, - }, - expectedStatus: clusterv1.MachineDeploymentStatus{ - ObservedGeneration: 2, - Replicas: 2, - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: 2, - ReadyReplicas: 0, - AvailableReplicas: 0, - UnavailableReplicas: 2, - }, - }, - Phase: "Failed", - }, - }, } for name, test := range tests { diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 0dc94a5c1e97..e1b15e2b50b4 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -44,7 +44,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" "sigs.k8s.io/cluster-api/api/v1beta2/index" "sigs.k8s.io/cluster-api/controllers/clustercache" - capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" @@ -504,7 +503,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { createNodeRefForMachine(true), nodeStatus(corev1.ConditionTrue), machineLabels(mhc.Spec.Selector.MatchLabels), - machineFailureReason("some failure"), + machineAnnotations(map[string]string{clusterv1.RemediateMachineAnnotation: ""}), ) defer cleanup2() machines = append(machines, unhealthyMachines...) @@ -614,7 +613,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { createNodeRefForMachine(true), nodeStatus(corev1.ConditionTrue), machineLabels(mhc.Spec.Selector.MatchLabels), - machineFailureMessage("some failure"), + machineAnnotations(map[string]string{clusterv1.RemediateMachineAnnotation: ""}), ) defer cleanup2() machines = append(machines, unhealthyMachines...) @@ -2534,8 +2533,6 @@ type machinesWithNodes struct { firstMachineAsControlPlane bool annotations map[string]string labels map[string]string - failureReason string - failureMessage string finalizers []string } @@ -2571,18 +2568,6 @@ func machineLabels(l map[string]string) machineWithNodesOption { } } -func machineFailureReason(s string) machineWithNodesOption { - return func(m *machinesWithNodes) { - m.failureReason = s - } -} - -func machineFailureMessage(s string) machineWithNodesOption { - return func(m *machinesWithNodes) { - m.failureMessage = s - } -} - func machineAnnotations(a map[string]string) machineWithNodesOption { return func(m *machinesWithNodes) { m.annotations = a @@ -2695,26 +2680,6 @@ func createMachinesWithNodes( } } - if o.failureReason != "" { - failureReason := capierrors.MachineStatusError(o.failureReason) - if machine.Status.Deprecated == nil { - machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{} - } - if machine.Status.Deprecated.V1Beta1 == nil { - machine.Status.Deprecated.V1Beta1 = &clusterv1.MachineV1Beta1DeprecatedStatus{} - } - machine.Status.Deprecated.V1Beta1.FailureReason = &failureReason - } - if o.failureMessage != "" { - if machine.Status.Deprecated == nil { - machine.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{} - } - if machine.Status.Deprecated.V1Beta1 == nil { - machine.Status.Deprecated.V1Beta1 = &clusterv1.MachineV1Beta1DeprecatedStatus{} - } - machine.Status.Deprecated.V1Beta1.FailureMessage = ptr.To(o.failureMessage) - } - // Adding one second to ensure there is a difference from the // original time so that the patch works. That is, ensure the // precision isn't lost during conversions. diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index 3dfad3afc610..a5d5fc2f392d 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -99,18 +99,6 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi return true, time.Duration(0) } - if t.Machine.Status.Deprecated != nil && t.Machine.Status.Deprecated.V1Beta1 != nil && t.Machine.Status.Deprecated.V1Beta1.FailureReason != nil { - v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "FailureReason: %v", *t.Machine.Status.Deprecated.V1Beta1.FailureReason) - logger.V(3).Info("Target is unhealthy", "failureReason", t.Machine.Status.Deprecated.V1Beta1.FailureReason) - return true, time.Duration(0) - } - - if t.Machine.Status.Deprecated != nil && t.Machine.Status.Deprecated.V1Beta1 != nil && t.Machine.Status.Deprecated.V1Beta1.FailureMessage != nil { - v1beta1conditions.MarkFalse(t.Machine, clusterv1.MachineHealthCheckSucceededV1Beta1Condition, clusterv1.MachineHasFailureV1Beta1Reason, clusterv1.ConditionSeverityWarning, "FailureMessage: %v", *t.Machine.Status.Deprecated.V1Beta1.FailureMessage) - logger.V(3).Info("Target is unhealthy", "failureMessage", t.Machine.Status.Deprecated.V1Beta1.FailureMessage) - return true, time.Duration(0) - } - // Machine has Status.NodeRef set, although we couldn't find the node in the workload cluster. if t.nodeMissing { logger.V(3).Info("Target is unhealthy: node is missing") diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index 51f1e563a042..fd5bd9d5bff5 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -30,7 +30,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util/conditions" v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" "sigs.k8s.io/cluster-api/util/patch" @@ -379,38 +378,6 @@ func TestHealthCheckTargets(t *testing.T) { nodeMissing: false, } - // Target for when the machine has a failure reason - failureReason := errors.UpdateMachineError - testMachineFailureReason := testMachine.DeepCopy() - testMachineFailureReason.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureReason: &failureReason, - }, - } - machineFailureReason := healthCheckTarget{ - Cluster: cluster, - MHC: testMHC, - Machine: testMachineFailureReason, - Node: nil, - } - machineFailureReasonCondition := newFailedHealthCheckV1Beta1Condition(clusterv1.MachineHasFailureV1Beta1Reason, "FailureReason: %s", failureReason) - - // Target for when the machine has a failure message - failureMsg := "some failure message" - testMachineFailureMsg := testMachine.DeepCopy() - testMachineFailureMsg.Status.Deprecated = &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureMessage: &failureMsg, - }, - } - machineFailureMsg := healthCheckTarget{ - Cluster: cluster, - MHC: testMHC, - Machine: testMachineFailureMsg, - Node: nil, - } - machineFailureMsgCondition := newFailedHealthCheckV1Beta1Condition(clusterv1.MachineHasFailureV1Beta1Reason, "FailureMessage: %s", failureMsg) - // Target for when the machine has the remediate machine annotation const annotationRemediationMsg = "Marked for remediation via remediate-machine annotation" const annotationRemediationV1Beta2Msg = "Health check failed: marked for remediation via cluster.x-k8s.io/remediate-machine annotation" @@ -507,22 +474,6 @@ func TestHealthCheckTargets(t *testing.T) { expectedNeedsRemediation: []healthCheckTarget{}, expectedNextCheckTimes: []time.Duration{}, // We don't have a timeout so no way to know when to re-check }, - { - desc: "when the machine has a failure reason", - targets: []healthCheckTarget{machineFailureReason}, - expectedHealthy: []healthCheckTarget{}, - expectedNeedsRemediation: []healthCheckTarget{machineFailureReason}, - expectedNeedsRemediationCondition: []clusterv1.Condition{machineFailureReasonCondition}, - expectedNextCheckTimes: []time.Duration{}, - }, - { - desc: "when the machine has a failure message", - targets: []healthCheckTarget{machineFailureMsg}, - expectedHealthy: []healthCheckTarget{}, - expectedNeedsRemediation: []healthCheckTarget{machineFailureMsg}, - expectedNeedsRemediationCondition: []clusterv1.Condition{machineFailureMsgCondition}, - expectedNextCheckTimes: []time.Duration{}, - }, { desc: "when the machine is manually marked for remediation", targets: []healthCheckTarget{machineAnnotationRemediation}, diff --git a/internal/controllers/machineset/machineset_delete_policy.go b/internal/controllers/machineset/machineset_delete_policy.go index adcb92c1d4f7..b099ca4d776d 100644 --- a/internal/controllers/machineset/machineset_delete_policy.go +++ b/internal/controllers/machineset/machineset_delete_policy.go @@ -142,9 +142,6 @@ func isMachineHealthy(machine *clusterv1.Machine) bool { if machine.Status.NodeRef == nil { return false } - if machine.Status.Deprecated != nil && machine.Status.Deprecated.V1Beta1 != nil && (machine.Status.Deprecated.V1Beta1.FailureReason != nil || machine.Status.Deprecated.V1Beta1.FailureMessage != nil) { - return false - } // Note: for the sake of prioritization, we are not making any assumption about Health when ConditionUnknown. if conditions.IsFalse(machine, clusterv1.MachineNodeReadyCondition) { return false diff --git a/internal/controllers/machineset/machineset_delete_policy_test.go b/internal/controllers/machineset/machineset_delete_policy_test.go index 7ce766b5a967..616bef335538 100644 --- a/internal/controllers/machineset/machineset_delete_policy_test.go +++ b/internal/controllers/machineset/machineset_delete_policy_test.go @@ -26,11 +26,9 @@ import ( "k8s.io/apimachinery/pkg/util/rand" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta2" - capierrors "sigs.k8s.io/cluster-api/errors" ) func TestMachineToDelete(t *testing.T) { - msg := "something wrong with the machine" now := metav1.Now() nodeRef := &corev1.ObjectReference{Name: "some-node"} healthyMachine := &clusterv1.Machine{Status: clusterv1.MachineStatus{NodeRef: nodeRef}} @@ -40,9 +38,10 @@ func TestMachineToDelete(t *testing.T) { } betterDeleteMachine := &clusterv1.Machine{ Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureMessage: &msg, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, NodeRef: nodeRef, @@ -289,7 +288,6 @@ func TestMachineToDelete(t *testing.T) { func TestMachineNewestDelete(t *testing.T) { currentTime := metav1.Now() - statusError := capierrors.MachineStatusError("I'm unhealthy!") nodeRef := &corev1.ObjectReference{Name: "some-node"} mustDeleteMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: ¤tTime}, @@ -318,9 +316,10 @@ func TestMachineNewestDelete(t *testing.T) { unhealthyMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureReason: &statusError, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, NodeRef: nodeRef, @@ -439,7 +438,6 @@ func TestMachineNewestDelete(t *testing.T) { func TestMachineOldestDelete(t *testing.T) { currentTime := metav1.Now() - statusError := capierrors.MachineStatusError("I'm unhealthy!") nodeRef := &corev1.ObjectReference{Name: "some-node"} empty := &clusterv1.Machine{ Status: clusterv1.MachineStatus{NodeRef: nodeRef}, @@ -467,9 +465,10 @@ func TestMachineOldestDelete(t *testing.T) { unhealthyMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureReason: &statusError, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, NodeRef: nodeRef, @@ -482,9 +481,10 @@ func TestMachineOldestDelete(t *testing.T) { unhealthyMachineA := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "a", CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureReason: &statusError, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, NodeRef: nodeRef, @@ -493,9 +493,10 @@ func TestMachineOldestDelete(t *testing.T) { unhealthyMachineZ := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "z", CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureReason: &statusError, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, NodeRef: nodeRef, @@ -731,8 +732,6 @@ func TestMachineDeleteMultipleSamePriority(t *testing.T) { func TestIsMachineHealthy(t *testing.T) { nodeRef := &corev1.ObjectReference{Name: "some-node"} - statusError := capierrors.MachineStatusError("I'm unhealthy!") - msg := "something wrong with the machine" tests := []struct { desc string @@ -748,9 +747,10 @@ func TestIsMachineHealthy(t *testing.T) { desc: "when it has a FailureReason", machine: &clusterv1.Machine{ Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureReason: &statusError, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, NodeRef: nodeRef, @@ -762,9 +762,10 @@ func TestIsMachineHealthy(t *testing.T) { desc: "when it has a FailureMessage", machine: &clusterv1.Machine{ Status: clusterv1.MachineStatus{ - Deprecated: &clusterv1.MachineDeprecatedStatus{ - V1Beta1: &clusterv1.MachineV1Beta1DeprecatedStatus{ - FailureMessage: &msg, + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineNodeReadyCondition, + Status: metav1.ConditionFalse, }, }, NodeRef: nodeRef, From 59af9c5bf4010e3e1cd3697ad6eb85032d8c2d44 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Wed, 23 Apr 2025 15:17:08 +0200 Subject: [PATCH 19/20] Small cleanups --- .../internal/controllers/controller.go | 4 +- .../kubeadm/internal/controllers/status.go | 9 ++-- .../internal/controllers/status_test.go | 10 ++--- .../cluster/cluster_controller_status.go | 42 +++++++++---------- .../machine/machine_controller_status.go | 13 +++--- .../machine/machine_controller_status_test.go | 2 +- 6 files changed, 39 insertions(+), 41 deletions(-) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 7251110eebda..f918cd960f41 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -214,7 +214,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. defer func() { // Always attempt to update status. - if err := r.updateStatus(ctx, controlPlane); err != nil { + if err := r.updateV1Beta1Status(ctx, controlPlane); err != nil { var connFailure *internal.RemoteClusterConnectionError if errors.As(err, &connFailure) { log.Error(err, "Could not connect to workload cluster to fetch status") @@ -224,7 +224,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. } } - r.updateV1Beta2Status(ctx, controlPlane) + r.updateStatus(ctx, controlPlane) // Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation. patchOpts := []patch.Option{} diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index 878d77ca9c3d..d7ebb395fdf4 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -39,9 +39,9 @@ import ( clog "sigs.k8s.io/cluster-api/util/log" ) -// updateStatus is called after every reconciliation loop in a defer statement to always make sure we have the +// updateV1Beta1Status is called after every reconciliation loop in a defer statement to always make sure we have the // KubeadmControlPlane status up-to-date. -func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, controlPlane *internal.ControlPlane) error { +func (r *KubeadmControlPlaneReconciler) updateV1Beta1Status(ctx context.Context, controlPlane *internal.ControlPlane) error { selector := collections.ControlPlaneSelectorForCluster(controlPlane.Cluster.Name) // Copy label selector to its status counterpart in string format. // This is necessary for CRDs including scale subresources. @@ -149,9 +149,8 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro return nil } -// updateV1Beta2Status reconciles KubeadmControlPlane's status during the entire lifecycle of the object. -// Note: v1beta1 conditions and fields are not managed by this func. -func (r *KubeadmControlPlaneReconciler) updateV1Beta2Status(ctx context.Context, controlPlane *internal.ControlPlane) { +// updateStatus reconciles KubeadmControlPlane's status during the entire lifecycle of the object. +func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, controlPlane *internal.ControlPlane) { // If the code failed initializing the control plane, do not update the status. if controlPlane == nil { return diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 7dba0f58d946..75828a2c3cdb 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -1927,7 +1927,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { } controlPlane.InjectTestManagementCluster(r.managementCluster) - g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) + g.Expect(r.updateV1Beta1Status(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.Deprecated.V1Beta1.ReadyReplicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.Deprecated.V1Beta1.UnavailableReplicas).To(BeEquivalentTo(0)) @@ -1999,7 +1999,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin } controlPlane.InjectTestManagementCluster(r.managementCluster) - g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) + g.Expect(r.updateV1Beta1Status(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.Deprecated.V1Beta1.ReadyReplicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.Deprecated.V1Beta1.UnavailableReplicas).To(BeEquivalentTo(3)) @@ -2077,7 +2077,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T } controlPlane.InjectTestManagementCluster(r.managementCluster) - g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) + g.Expect(r.updateV1Beta1Status(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.Deprecated.V1Beta1.ReadyReplicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.Deprecated.V1Beta1.UnavailableReplicas).To(BeEquivalentTo(0)) @@ -2159,7 +2159,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing } controlPlane.InjectTestManagementCluster(r.managementCluster) - g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) + g.Expect(r.updateV1Beta1Status(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(5)) g.Expect(kcp.Status.Deprecated.V1Beta1.ReadyReplicas).To(BeEquivalentTo(1)) g.Expect(kcp.Status.Deprecated.V1Beta1.UnavailableReplicas).To(BeEquivalentTo(4)) @@ -2240,7 +2240,7 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr } controlPlane.InjectTestManagementCluster(r.managementCluster) - g.Expect(r.updateStatus(ctx, controlPlane)).To(Succeed()) + g.Expect(r.updateV1Beta1Status(ctx, controlPlane)).To(Succeed()) g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(3)) g.Expect(kcp.Status.Deprecated.V1Beta1.ReadyReplicas).To(BeEquivalentTo(0)) g.Expect(kcp.Status.Deprecated.V1Beta1.UnavailableReplicas).To(BeEquivalentTo(3)) diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index b21c6ec395c5..294ed8348e10 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -56,7 +56,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) error { // TODO: "expv1.MachinePoolList{}" below should be replaced through "s.descendants.machinePools" once replica counters // and Available, ScalingUp and ScalingDown conditions have been implemented for MachinePools. - // TODO: This should be removed once the UpToDate v1beta1Condition has been implemented for MachinePool Machines + // TODO: This should be removed once the UpToDate Condition has been implemented for MachinePool Machines isMachinePoolMachine := func(machine *clusterv1.Machine) bool { _, isMachinePoolMachine := machine.Labels[clusterv1.MachinePoolNameLabel] return isMachinePoolMachine @@ -282,7 +282,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust return } - // In case v1beta1Condition has NoReasonReported and status true, we assume it is a v1beta1 v1beta1Condition + // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition // and replace the reason with something less confusing. if ready.Reason == conditions.NoReasonReported && ready.Status == metav1.ConditionTrue { ready.Reason = clusterv1.ClusterInfrastructureReadyReason @@ -383,7 +383,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu return } - // In case v1beta1Condition has NoReasonReported and status true, we assume it is a v1beta1 v1beta1Condition + // In case condition has NoReasonReported and status true, we assume it is a v1beta1 condition // and replace the reason with something less confusing. if available.Reason == conditions.NoReasonReported && available.Status == metav1.ConditionTrue { available.Reason = clusterv1.ClusterControlPlaneAvailableReason @@ -640,9 +640,9 @@ func setWorkerMachinesReadyCondition(ctx context.Context, cluster *clusterv1.Clu } func setControlPlaneMachinesUpToDateCondition(ctx context.Context, cluster *clusterv1.Cluster, machines collections.Machines, getDescendantsSucceeded bool) { - // Only consider Machines that have an UpToDate v1beta1Condition or are older than 10s. - // This is done to ensure the MachinesUpToDate v1beta1Condition doesn't flicker after a new Machine is created, - // because it can take a bit until the UpToDate v1beta1Condition is set on a new Machine. + // Only consider Machines that have an UpToDate condition or are older than 10s. + // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, + // because it can take a bit until the UpToDate condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { return conditions.Has(machine, clusterv1.MachineUpToDateCondition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) @@ -659,9 +659,9 @@ func setControlPlaneMachinesUpToDateCondition(ctx context.Context, cluster *clus } func setWorkerMachinesUpToDateCondition(ctx context.Context, cluster *clusterv1.Cluster, machines collections.Machines, getDescendantsSucceeded bool) { - // Only consider Machines that have an UpToDate v1beta1Condition or are older than 10s. - // This is done to ensure the MachinesUpToDate v1beta1Condition doesn't flicker after a new Machine is created, - // because it can take a bit until the UpToDate v1beta1Condition is set on a new Machine. + // Only consider Machines that have an UpToDate condition or are older than 10s. + // This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created, + // because it can take a bit until the UpToDate condition is set on a new Machine. machines = machines.Filter(func(machine *clusterv1.Machine) bool { return conditions.Has(machine, clusterv1.MachineUpToDateCondition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second }) @@ -764,7 +764,7 @@ func setRemediatingCondition(ctx context.Context, cluster *clusterv1.Cluster, ma machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedCondition, conditions.TargetConditionType(clusterv1.ClusterRemediatingCondition), // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc - // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the v1beta1Condition). + // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { conditions.Set(cluster, metav1.Condition{ @@ -803,7 +803,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con ws := make([]aggregationWrapper, 0, len(machinePools.Items)+len(machineDeployments.Items)+1) if controlPlane != nil { - // control plane is considered only if it is reporting the v1beta1Condition (the contract does not require conditions to be reported) + // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions RollingOut not yet reported from ...". if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.RollingOutCondition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) @@ -828,7 +828,7 @@ func setRollingOutCondition(ctx context.Context, cluster *clusterv1.Cluster, con rollingOutCondition, err := conditions.NewAggregateCondition( ws, clusterv1.RollingOutCondition, conditions.TargetConditionType(clusterv1.ClusterRollingOutCondition), - // Instruct aggregate to consider RollingOut v1beta1Condition with negative polarity. + // Instruct aggregate to consider RollingOut condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.RollingOutCondition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the RollingOut has negative polarity. @@ -874,7 +874,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont ws := make([]aggregationWrapper, 0, len(machinePools.Items)+len(machineDeployments.Items)+1) if controlPlane != nil { - // control plane is considered only if it is reporting the v1beta1Condition (the contract does not require conditions to be reported) + // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions ScalingUp not yet reported from ...". if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.ScalingUpCondition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) @@ -905,7 +905,7 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont scalingUpCondition, err := conditions.NewAggregateCondition( ws, clusterv1.ScalingUpCondition, conditions.TargetConditionType(clusterv1.ClusterScalingUpCondition), - // Instruct aggregate to consider ScalingUp v1beta1Condition with negative polarity. + // Instruct aggregate to consider ScalingUp condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.ScalingUpCondition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the ScalingUp has negative polarity. @@ -951,7 +951,7 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co ws := make([]aggregationWrapper, 0, len(machinePools.Items)+len(machineDeployments.Items)+1) if controlPlane != nil { - // control plane is considered only if it is reporting the v1beta1Condition (the contract does not require conditions to be reported) + // control plane is considered only if it is reporting the condition (the contract does not require conditions to be reported) // Note: this implies that it won't surface as "Conditions ScalingDown not yet reported from ...". if c, err := conditions.UnstructuredGet(controlPlane, clusterv1.ScalingDownCondition); err == nil && c != nil { ws = append(ws, aggregationWrapper{cp: controlPlane}) @@ -982,7 +982,7 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co scalingDownCondition, err := conditions.NewAggregateCondition( ws, clusterv1.ScalingDownCondition, conditions.TargetConditionType(clusterv1.ClusterScalingDownCondition), - // Instruct aggregate to consider ScalingDown v1beta1Condition with negative polarity. + // Instruct aggregate to consider ScalingDown condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.ScalingDownCondition}, // Using a custom merge strategy to override reasons applied during merge and to ensure merge // takes into account the fact the ScalingDown has negative polarity. @@ -1048,7 +1048,7 @@ func (c clusterConditionCustomMergeStrategy) Merge(operation conditions.MergeOpe } } - // Treat all reasons except TopologyReconcileFailed and ClusterClassNotReconciled of TopologyReconciled v1beta1Condition as info. + // Treat all reasons except TopologyReconcileFailed and ClusterClassNotReconciled of TopologyReconciled condition as info. if condition.Type == clusterv1.ClusterTopologyReconciledCondition && condition.Status == metav1.ConditionFalse && condition.Reason != clusterv1.ClusterTopologyReconciledFailedReason && condition.Reason != clusterv1.ClusterTopologyReconciledClusterClassNotReconciledReason { return conditions.InfoMergePriority @@ -1088,14 +1088,14 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clus summaryOpts := []conditions.SummaryOption{ forConditionTypes, - // Instruct summary to consider Deleting v1beta1Condition with negative polarity. + // Instruct summary to consider Deleting condition with negative polarity. conditions.NegativePolarityConditionTypes{clusterv1.ClusterDeletingCondition}, // Using a custom merge strategy to override reasons applied during merge and to ignore some - // info message so the available v1beta1Condition is less noisy. + // info message so the available condition is less noisy. conditions.CustomMergeStrategy{ MergeStrategy: clusterConditionCustomMergeStrategy{ cluster: cluster, - // Instruct merge to consider Deleting v1beta1Condition with negative polarity, + // Instruct merge to consider Deleting condition with negative polarity, negativePolarityConditionTypes: negativePolarityConditionTypes, }, }, @@ -1109,7 +1109,7 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster, clus if err != nil { // Note, this could only happen if we hit edge cases in computing the summary, which should not happen due to the fact // that we are passing a non empty list of ForConditionTypes. - log.Error(err, "Failed to set Available v1beta1Condition") + log.Error(err, "Failed to set Available condition") availableCondition = &metav1.Condition{ Type: clusterv1.ClusterAvailableCondition, Status: metav1.ConditionUnknown, diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index 0074e52cdc76..f278d0661ed9 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -42,7 +42,6 @@ import ( // machine being partially deleted but also for running machines being disrupted e.g. by deleting the node. // Additionally, this func should ensure that the conditions managed by this controller are always set in order to // comply with the recommendation in the Kubernetes API guidelines. -// Note: v1beta1 conditions are not managed by this func. func (r *Reconciler) updateStatus(ctx context.Context, s *scope) { // Update status from the Bootstrap Config external resource. // Note: some of the status fields derived from the Bootstrap Config are managed in reconcileBootstrap, e.g. status.BootstrapReady, etc. @@ -343,7 +342,7 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl } conditions.Set(machine, *nodeReady) - status, reason, message := summarizeNodeV1Beta2Conditions(ctx, node) + status, reason, message := summarizeNodeConditions(ctx, node) conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineNodeHealthyCondition, Status: status, @@ -373,8 +372,8 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl // Report an issue if node missing after being initialized. if machine.Status.NodeRef != nil { - // Setting MachineNodeHealthyV1Beta2Condition to False to give it more relevance in the Ready condition summary. - // Setting MachineNodeReadyV1Beta2Condition to False to keep it consistent with MachineNodeHealthyV1Beta2Condition. + // Setting MachineNodeHealthyCondition to False to give it more relevance in the Ready condition summary. + // Setting MachineNodeReadyCondition to False to keep it consistent with MachineNodeHealthyCondition. setNodeConditions(machine, metav1.ConditionFalse, clusterv1.MachineNodeDeletedReason, fmt.Sprintf("Node %s has been deleted while the Machine still exists", machine.Status.NodeRef.Name)) @@ -421,10 +420,10 @@ func maxTime(t1, t2 time.Time) time.Time { return t2 } -// summarizeNodeV1Beta2Conditions summarizes a Node's conditions (NodeReady, NodeMemoryPressure, NodeDiskPressure, NodePIDPressure). +// summarizeNodeConditions summarizes a Node's conditions (NodeReady, NodeMemoryPressure, NodeDiskPressure, NodePIDPressure). // the summary is computed in way that is similar to how conditions.NewSummaryCondition works, but in this case the // implementation is simpler/less flexible and it surfaces only issues & unknown conditions. -func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav1.ConditionStatus, string, string) { +func summarizeNodeConditions(_ context.Context, node *corev1.Node) (metav1.ConditionStatus, string, string) { semanticallyFalseStatus := 0 unknownStatus := 0 @@ -511,7 +510,7 @@ func (c machineConditionCustomMergeStrategy) Merge(operation conditions.MergeOpe if condition.Type == clusterv1.MachineNodeHealthyCondition && (condition.Reason == clusterv1.MachineNodeDeletedReason || condition.Reason == clusterv1.MachineNodeDoesNotExistReason) { return conditions.InfoMergePriority } - // Note: MachineNodeReadyV1Beta2Condition is not relevant for the summary. + // Note: MachineNodeReadyCondition is not relevant for the summary. } return conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes...)(condition) }), diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 227a29a9f5b4..10485b0a90ed 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -624,7 +624,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) { Conditions: test.conditions, }, } - status, reason, message := summarizeNodeV1Beta2Conditions(ctx, node) + status, reason, message := summarizeNodeConditions(ctx, node) g.Expect(status).To(Equal(test.expectedStatus)) g.Expect(reason).To(Equal(test.expectedReason)) g.Expect(message).To(Equal(test.expectedMessage)) From 754995bbd2b608aef7e3ff68e6ebe412616e406b Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 25 Apr 2025 19:22:55 +0200 Subject: [PATCH 20/20] Stop using deprecated counters in controllers --- .../topologymutation_variable_types.go | 2 +- .../api/v1alpha1/zz_generated.deepcopy.go | 2 +- .../api/v1alpha1/zz_generated.openapi.go | 2 +- exp/topology/desiredstate/desired_state.go | 2 +- .../desiredstate/desired_state_test.go | 10 +- internal/contract/controlplane.go | 152 ++++++---------- internal/contract/controlplane_test.go | 167 +++--------------- internal/contract/types.go | 4 +- .../cluster/cluster_controller_status.go | 23 ++- .../cluster/cluster_controller_status_test.go | 54 +++--- .../machine/machine_controller_status.go | 4 +- .../machine/machine_controller_status_test.go | 4 +- .../machine/machine_controller_test.go | 10 +- .../machinedeployment_controller_test.go | 9 +- .../machinedeployment_rolling.go | 15 +- .../machinedeployment_rolling_test.go | 34 +--- .../machinedeployment_status.go | 12 +- .../machinedeployment_status_test.go | 2 +- .../machinedeployment_sync.go | 17 +- .../machinedeployment_sync_test.go | 37 ++-- .../machinedeployment/mdutil/util.go | 38 ++-- .../machinedeployment/mdutil/util_test.go | 18 +- .../machinedeployment/suite_test.go | 7 +- .../machineset/machineset_controller.go | 14 +- .../machineset_controller_status.go | 6 +- .../machineset/machineset_controller_test.go | 10 +- internal/controllers/machineset/suite_test.go | 7 +- .../cluster/cluster_controller_test.go | 4 +- .../topology/cluster/conditions_test.go | 105 ++++------- internal/util/tree/tree.go | 8 +- test/framework/control_plane.go | 8 +- test/framework/controlplane_helpers.go | 21 +-- test/framework/machinedeployment_helpers.go | 6 +- 33 files changed, 283 insertions(+), 531 deletions(-) diff --git a/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go b/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go index a7adb476d00e..2f4dc7ae85b1 100644 --- a/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go +++ b/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go @@ -125,7 +125,7 @@ type ControlPlaneBuiltins struct { // replicas is the value of the replicas field of the ControlPlane object. // +optional - Replicas *int64 `json:"replicas,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` // machineTemplate is the value of the .spec.machineTemplate field of the ControlPlane object. // +optional diff --git a/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go b/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go index 940210bfd13e..181317d24b9a 100644 --- a/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go +++ b/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go @@ -504,7 +504,7 @@ func (in *ControlPlaneBuiltins) DeepCopyInto(out *ControlPlaneBuiltins) { } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas - *out = new(int64) + *out = new(int32) **out = **in } if in.MachineTemplate != nil { diff --git a/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go b/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go index 627c45b105eb..b0c1176a9b1f 100644 --- a/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go +++ b/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go @@ -1031,7 +1031,7 @@ func schema_runtime_hooks_api_v1alpha1_ControlPlaneBuiltins(ref common.Reference SchemaProps: spec.SchemaProps{ Description: "replicas is the value of the replicas field of the ControlPlane object.", Type: []string{"integer"}, - Format: "int64", + Format: "int32", }, }, "machineTemplate": { diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index d997a8247521..1d3da559cc1b 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -354,7 +354,7 @@ func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // NOTE: If the Topology.ControlPlane.replicas value is nil, it is assumed that the control plane controller // does not implement support for this field and the ControlPlane object is generated without the number of Replicas. if s.Blueprint.Topology.ControlPlane.Replicas != nil { - if err := contract.ControlPlane().Replicas().Set(controlPlane, int64(*s.Blueprint.Topology.ControlPlane.Replicas)); err != nil { + if err := contract.ControlPlane().Replicas().Set(controlPlane, *s.Blueprint.Topology.ControlPlane.Replicas); err != nil { return nil, errors.Wrapf(err, "failed to set %s in the ControlPlane object", contract.ControlPlane().Replicas().Path()) } } diff --git a/exp/topology/desiredstate/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go index ead1391ba2c1..0badc4dab66e 100644 --- a/exp/topology/desiredstate/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -1859,13 +1859,9 @@ func TestComputeMachineDeployment(t *testing.T) { WithStatus(clusterv1.MachineDeploymentStatus{ ObservedGeneration: 2, Replicas: 2, - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - ReadyReplicas: 2, - UpdatedReplicas: 2, - AvailableReplicas: 2, - }, - }, + ReadyReplicas: ptr.To[int32](2), + UpToDateReplicas: ptr.To[int32](2), + AvailableReplicas: ptr.To[int32](2), }). Build() mdsState = duplicateMachineDeploymentsState(mdsState) diff --git a/internal/contract/controlplane.go b/internal/contract/controlplane.go index 1d3424b02231..85bb28c5fb9c 100644 --- a/internal/contract/controlplane.go +++ b/internal/contract/controlplane.go @@ -93,95 +93,49 @@ func (c *ControlPlaneContract) ControlPlaneEndpoint() *ControlPlaneEndpoint { // NOTE: When working with unstructured there is no way to understand if the ControlPlane provider // do support a field in the type definition from the fact that a field is not set in a given instance. // This is why in we are deriving if replicas is required from the ClusterClass in the topology reconciler code. -func (c *ControlPlaneContract) Replicas() *Int64 { - return &Int64{ +func (c *ControlPlaneContract) Replicas() *Int32 { + return &Int32{ path: []string{"spec", "replicas"}, } } // StatusReplicas provide access to the status.replicas field in a ControlPlane object, if any. Applies to implementations using replicas. -func (c *ControlPlaneContract) StatusReplicas() *Int64 { - return &Int64{ +func (c *ControlPlaneContract) StatusReplicas() *Int32 { + return &Int32{ path: []string{"status", "replicas"}, } } -// UpdatedReplicas provide access to the status.updatedReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. -// TODO (v1beta2): Rename to V1Beta1DeprecatedUpdatedReplicas and make sure we are only using this method for compatibility with old contracts. -func (c *ControlPlaneContract) UpdatedReplicas(contractVersion string) *Int64 { - if contractVersion == "v1beta1" { - return &Int64{ - path: []string{"status", "updatedReplicas"}, - } - } - - return &Int64{ - path: []string{"status", "deprecated", "v1beta1", "updatedReplicas"}, - } -} - // ReadyReplicas provide access to the status.readyReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. -// TODO (v1beta2): Rename to V1Beta1DeprecatedReadyReplicas and make sure we are only using this method for compatibility with old contracts. -func (c *ControlPlaneContract) ReadyReplicas(contractVersion string) *Int64 { - if contractVersion == "v1beta1" { - return &Int64{ - path: []string{"status", "readyReplicas"}, - } - } - - return &Int64{ - path: []string{"status", "deprecated", "v1beta1", "readyReplicas"}, - } -} - -// UnavailableReplicas provide access to the status.unavailableReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. -// TODO (v1beta2): Rename to V1Beta1DeprecatedUnavailableReplicas and make sure we are only using this method for compatibility with old contracts. -func (c *ControlPlaneContract) UnavailableReplicas(contractVersion string) *Int64 { - if contractVersion == "v1beta1" { - return &Int64{ - path: []string{"status", "unavailableReplicas"}, - } - } - - return &Int64{ - path: []string{"status", "deprecated", "v1beta1", "unavailableReplicas"}, - } -} - -// V1Beta2ReadyReplicas provide access to the status.readyReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. -// TODO (v1beta2): Drop V1Beta2 prefix.. -func (c *ControlPlaneContract) V1Beta2ReadyReplicas(contractVersion string) *Int32 { - if contractVersion == "v1beta1" { - return &Int32{ - path: []string{"status", "v1beta2", "readyReplicas"}, - } - } - +// NOTE: readyReplicas changed semantic in v1beta2 contract. +func (c *ControlPlaneContract) ReadyReplicas() *Int32 { return &Int32{ path: []string{"status", "readyReplicas"}, } } -// V1Beta2AvailableReplicas provide access to the status.availableReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. -// TODO (v1beta2): Drop V1Beta2 prefix.x. -func (c *ControlPlaneContract) V1Beta2AvailableReplicas(contractVersion string) *Int32 { - if contractVersion == "v1beta1" { - return &Int32{ - path: []string{"status", "v1beta2", "availableReplicas"}, - } - } - +// AvailableReplicas provide access to the status.availableReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. +// NOTE: availableReplicas was introduced by the v1beta2 contract; use unavailableReplicas for the v1beta1 contract. +func (c *ControlPlaneContract) AvailableReplicas() *Int32 { return &Int32{ path: []string{"status", "availableReplicas"}, } } -// V1Beta2UpToDateReplicas provide access to the status.upToDateReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. -// TODO (v1beta2): Drop V1Beta2 prefix.ix. -func (c *ControlPlaneContract) V1Beta2UpToDateReplicas(contractVersion string) *Int32 { +// V1Beta1UnavailableReplicas provide access to the status.unavailableReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. +// NOTE: use availableReplicas when working with the v1beta2 contract. +func (c *ControlPlaneContract) V1Beta1UnavailableReplicas() *Int64 { + return &Int64{ + path: []string{"status", "unavailableReplicas"}, + } +} + +// UpToDateReplicas provide access to the status.upToDateReplicas field in a ControlPlane object, if any. Applies to implementations using replicas. +// NOTE: upToDateReplicas was introduced by the v1beta2 contract; code will fall back to updatedReplicas for the v1beta1 contract. +func (c *ControlPlaneContract) UpToDateReplicas(contractVersion string) *Int32 { if contractVersion == "v1beta1" { return &Int32{ - path: []string{"status", "v1beta2", "upToDateReplicas"}, + path: []string{"status", "updatedReplicas"}, } } @@ -291,13 +245,9 @@ func (c *ControlPlaneContract) IsUpgrading(obj *unstructured.Unstructured) (bool func (c *ControlPlaneContract) IsScaling(obj *unstructured.Unstructured, contractVersion string) (bool, error) { desiredReplicas, err := c.Replicas().Get(obj) if err != nil { - return false, errors.Wrap(err, "failed to get control plane spec replicas") + return false, errors.Wrapf(err, "failed to get control plane %s", c.Replicas().Path().String()) } - // TODO (v1beta2): Add a new code path using v1beta2 replica counters - // note: currently we are still always using v1beta1 counters no matter if they are moved under deprecated - // but we should stop doing this ASAP - statusReplicas, err := c.StatusReplicas().Get(obj) if err != nil { if errors.Is(err, ErrFieldNotFound) { @@ -306,10 +256,10 @@ func (c *ControlPlaneContract) IsScaling(obj *unstructured.Unstructured, contrac // so that we can block any operations that expect control plane to be stable. return true, nil } - return false, errors.Wrap(err, "failed to get control plane status replicas") + return false, errors.Wrapf(err, "failed to get control plane %s", c.StatusReplicas().Path().String()) } - updatedReplicas, err := c.UpdatedReplicas(contractVersion).Get(obj) + upToDatedReplicas, err := c.UpToDateReplicas(contractVersion).Get(obj) if err != nil { if errors.Is(err, ErrFieldNotFound) { // If updatedReplicas is not set on the control plane @@ -317,10 +267,10 @@ func (c *ControlPlaneContract) IsScaling(obj *unstructured.Unstructured, contrac // we block any operation that expect the control plane to be stable. return true, nil } - return false, errors.Wrap(err, "failed to get control plane status updatedReplicas") + return false, errors.Wrapf(err, "failed to get control plane %s", c.UpToDateReplicas(contractVersion).Path().String()) } - readyReplicas, err := c.ReadyReplicas(contractVersion).Get(obj) + readyReplicas, err := c.ReadyReplicas().Get(obj) if err != nil { if errors.Is(err, ErrFieldNotFound) { // If readyReplicas is not set on the control plane @@ -328,32 +278,46 @@ func (c *ControlPlaneContract) IsScaling(obj *unstructured.Unstructured, contrac // we block any operation that expect the control plane to be stable. return true, nil } - return false, errors.Wrap(err, "failed to get control plane status readyReplicas") + return false, errors.Wrapf(err, "failed to get control plane %s", c.ReadyReplicas().Path().String()) } - unavailableReplicas, err := c.UnavailableReplicas(contractVersion).Get(obj) - if err != nil { - if !errors.Is(err, ErrFieldNotFound) { - return false, errors.Wrap(err, "failed to get control plane status unavailableReplicas") + var availableReplicas *int32 + if contractVersion == "v1beta1" { + unavailableReplicas, err := c.V1Beta1UnavailableReplicas().Get(obj) + if err != nil { + if !errors.Is(err, ErrFieldNotFound) { + return false, errors.Wrapf(err, "failed to get control plane %s", c.V1Beta1UnavailableReplicas().Path().String()) + } + // If unavailableReplicas is not set on the control plane we assume it is 0. + // We have to do this as the following happens after clusterctl move with KCP: + // * clusterctl move creates the KCP object without status + // * the KCP controller won't patch the field to 0 if it doesn't exist + // * This is because the patchHelper marshals before/after object to JSON to calculate a diff + // and as the unavailableReplicas field is not a pointer, not set and 0 are both rendered as 0. + // If before/after of the field is the same (i.e. 0), there is no diff and thus also no patch to set it to 0. + unavailableReplicas = ptr.To[int64](0) + } + availableReplicas = ptr.To(*desiredReplicas - int32(*unavailableReplicas)) + } else { + availableReplicas, err = c.AvailableReplicas().Get(obj) + if err != nil { + if errors.Is(err, ErrFieldNotFound) { + // If readyReplicas is not set on the control plane + // we should consider the control plane to be scaling so that + // we block any operation that expect the control plane to be stable. + return true, nil + } + return false, errors.Wrapf(err, "failed to get control plane %s", c.AvailableReplicas().Path().String()) } - // If unavailableReplicas is not set on the control plane we assume it is 0. - // We have to do this as the following happens after clusterctl move with KCP: - // * clusterctl move creates the KCP object without status - // * the KCP controller won't patch the field to 0 if it doesn't exist - // * This is because the patchHelper marshals before/after object to JSON to calculate a diff - // and as the unavailableReplicas field is not a pointer, not set and 0 are both rendered as 0. - // If before/after of the field is the same (i.e. 0), there is no diff and thus also no patch to set it to 0. - unavailableReplicas = ptr.To[int64](0) } // Control plane is still scaling if: - // * .spec.replicas, .status.replicas, .status.updatedReplicas, - // .status.readyReplicas are not equal and - // * unavailableReplicas > 0 + // * .spec.replicas, .status.replicas, .status.upToDateReplicas, + // .status.readyReplicas, .status.availableReplicas are not equal. if *statusReplicas != *desiredReplicas || - *updatedReplicas != *desiredReplicas || + *upToDatedReplicas != *desiredReplicas || *readyReplicas != *desiredReplicas || - *unavailableReplicas > 0 { + *availableReplicas != *desiredReplicas { return true, nil } return false, nil diff --git a/internal/contract/controlplane_test.go b/internal/contract/controlplane_test.go index 12d1e3ee853a..a1c973c08d85 100644 --- a/internal/contract/controlplane_test.go +++ b/internal/contract/controlplane_test.go @@ -103,200 +103,85 @@ func TestControlPlane(t *testing.T) { g.Expect(ControlPlane().Replicas().Path()).To(Equal(Path{"spec", "replicas"})) - err := ControlPlane().Replicas().Set(obj, int64(3)) + err := ControlPlane().Replicas().Set(obj, int32(3)) g.Expect(err).ToNot(HaveOccurred()) got, err := ControlPlane().Replicas().Get(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(3))) + g.Expect(*got).To(Equal(int32(3))) }) t.Run("Manages status.replicas", func(t *testing.T) { g := NewWithT(t) g.Expect(ControlPlane().StatusReplicas().Path()).To(Equal(Path{"status", "replicas"})) - err := ControlPlane().StatusReplicas().Set(obj, int64(3)) + err := ControlPlane().StatusReplicas().Set(obj, int32(3)) g.Expect(err).ToNot(HaveOccurred()) got, err := ControlPlane().StatusReplicas().Get(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(3))) - }) - t.Run("Manages status.updatedreplicas", func(t *testing.T) { - g := NewWithT(t) - - g.Expect(ControlPlane().UpdatedReplicas("v1beta1").Path()).To(Equal(Path{"status", "updatedReplicas"})) - - err := ControlPlane().UpdatedReplicas("v1beta1").Set(obj, int64(3)) - g.Expect(err).ToNot(HaveOccurred()) - - got, err := ControlPlane().UpdatedReplicas("v1beta1").Get(obj) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(3))) - - g.Expect(ControlPlane().UpdatedReplicas("v1beta2").Path()).To(Equal(Path{"status", "deprecated", "v1beta1", "updatedReplicas"})) - - obj := &unstructured.Unstructured{Object: map[string]interface{}{ - "status": map[string]interface{}{ - "deprecated": map[string]interface{}{ - "v1beta1": map[string]interface{}{ - "updatedReplicas": int64(5), - }, - }, - }, - }} - - got, err = ControlPlane().UpdatedReplicas("v1beta2").Get(obj) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(5))) + g.Expect(*got).To(Equal(int32(3))) }) - t.Run("Manages status.readyReplicas", func(t *testing.T) { + t.Run("Manages status.upToDateReplicas", func(t *testing.T) { g := NewWithT(t) - g.Expect(ControlPlane().ReadyReplicas("v1beta1").Path()).To(Equal(Path{"status", "readyReplicas"})) + g.Expect(ControlPlane().UpToDateReplicas("v1beta1").Path()).To(Equal(Path{"status", "updatedReplicas"})) - err := ControlPlane().ReadyReplicas("v1beta1").Set(obj, int64(3)) + err := ControlPlane().UpToDateReplicas("v1beta1").Set(obj, int32(3)) g.Expect(err).ToNot(HaveOccurred()) - got, err := ControlPlane().ReadyReplicas("v1beta1").Get(obj) + got, err := ControlPlane().UpToDateReplicas("v1beta1").Get(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(3))) - - g.Expect(ControlPlane().ReadyReplicas("v1beta2").Path()).To(Equal(Path{"status", "deprecated", "v1beta1", "readyReplicas"})) - - obj := &unstructured.Unstructured{Object: map[string]interface{}{ - "status": map[string]interface{}{ - "deprecated": map[string]interface{}{ - "v1beta1": map[string]interface{}{ - "readyReplicas": int64(5), - }, - }, - }, - }} - - got, err = ControlPlane().ReadyReplicas("v1beta2").Get(obj) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(5))) - }) - t.Run("Manages status.unavailableReplicas", func(t *testing.T) { - g := NewWithT(t) - - g.Expect(ControlPlane().UnavailableReplicas("v1beta1").Path()).To(Equal(Path{"status", "unavailableReplicas"})) + g.Expect(*got).To(Equal(int32(3))) - err := ControlPlane().UnavailableReplicas("v1beta1").Set(obj, int64(3)) - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ControlPlane().UpToDateReplicas("v1beta2").Path()).To(Equal(Path{"status", "upToDateReplicas"})) - got, err := ControlPlane().UnavailableReplicas("v1beta1").Get(obj) + err = ControlPlane().UpToDateReplicas("v1beta2").Set(obj, int32(5)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(3))) - - g.Expect(ControlPlane().UnavailableReplicas("v1beta2").Path()).To(Equal(Path{"status", "deprecated", "v1beta1", "unavailableReplicas"})) - obj := &unstructured.Unstructured{Object: map[string]interface{}{ - "status": map[string]interface{}{ - "deprecated": map[string]interface{}{ - "v1beta1": map[string]interface{}{ - "unavailableReplicas": int64(5), - }, - }, - }, - }} - - got, err = ControlPlane().UnavailableReplicas("v1beta2").Get(obj) + got, err = ControlPlane().UpToDateReplicas("v1beta2").Get(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int64(5))) + g.Expect(*got).To(Equal(int32(5))) }) - t.Run("Manages status.readyReplicas for v1beta2 status", func(t *testing.T) { + t.Run("Manages status.readyReplicas", func(t *testing.T) { g := NewWithT(t) - g.Expect(ControlPlane().V1Beta2ReadyReplicas("v1beta1").Path()).To(Equal(Path{"status", "v1beta2", "readyReplicas"})) + g.Expect(ControlPlane().ReadyReplicas().Path()).To(Equal(Path{"status", "readyReplicas"})) - obj := &unstructured.Unstructured{Object: map[string]interface{}{ - "status": map[string]interface{}{ - "readyReplicas": int64(3), - "v1beta2": map[string]interface{}{ - "readyReplicas": int64(5), - }, - }, - }} - - got, err := ControlPlane().V1Beta2ReadyReplicas("v1beta1").Get(obj) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int32(5))) - - g.Expect(ControlPlane().V1Beta2ReadyReplicas("v1beta2").Path()).To(Equal(Path{"status", "readyReplicas"})) - - err = ControlPlane().V1Beta2ReadyReplicas("v1beta2").Set(obj, 3) + err := ControlPlane().ReadyReplicas().Set(obj, int32(3)) g.Expect(err).ToNot(HaveOccurred()) - got, err = ControlPlane().V1Beta2ReadyReplicas("v1beta2").Get(obj) + got, err := ControlPlane().ReadyReplicas().Get(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).ToNot(BeNil()) g.Expect(*got).To(Equal(int32(3))) }) - t.Run("Manages status.availableReplicas for v1beta2 status", func(t *testing.T) { + t.Run("Manages status.V1Beta1UnavailableReplicas", func(t *testing.T) { g := NewWithT(t) - g.Expect(ControlPlane().V1Beta2AvailableReplicas("v1beta1").Path()).To(Equal(Path{"status", "v1beta2", "availableReplicas"})) - - obj := &unstructured.Unstructured{Object: map[string]interface{}{ - "status": map[string]interface{}{ - "availableReplicas": int64(3), - "v1beta2": map[string]interface{}{ - "availableReplicas": int64(5), - }, - }, - }} + g.Expect(ControlPlane().V1Beta1UnavailableReplicas().Path()).To(Equal(Path{"status", "unavailableReplicas"})) - got, err := ControlPlane().V1Beta2AvailableReplicas("v1beta1").Get(obj) + err := ControlPlane().V1Beta1UnavailableReplicas().Set(obj, int64(3)) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int32(5))) - - g.Expect(ControlPlane().V1Beta2AvailableReplicas("v1beta2").Path()).To(Equal(Path{"status", "availableReplicas"})) - err = ControlPlane().V1Beta2AvailableReplicas("v1beta2").Set(obj, 3) - g.Expect(err).ToNot(HaveOccurred()) - - got, err = ControlPlane().V1Beta2AvailableReplicas("v1beta2").Get(obj) + got, err := ControlPlane().V1Beta1UnavailableReplicas().Get(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int32(3))) + g.Expect(*got).To(Equal(int64(3))) }) - t.Run("Manages status.upToDateReplicas for v1beta2 status", func(t *testing.T) { + t.Run("Manages status.availableReplicas", func(t *testing.T) { g := NewWithT(t) - g.Expect(ControlPlane().V1Beta2UpToDateReplicas("v1beta1").Path()).To(Equal(Path{"status", "v1beta2", "upToDateReplicas"})) - - obj := &unstructured.Unstructured{Object: map[string]interface{}{ - "status": map[string]interface{}{ - "upToDateReplicas": int64(3), - "v1beta2": map[string]interface{}{ - "upToDateReplicas": int64(5), - }, - }, - }} - - got, err := ControlPlane().V1Beta2UpToDateReplicas("v1beta1").Get(obj) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).ToNot(BeNil()) - g.Expect(*got).To(Equal(int32(5))) - - g.Expect(ControlPlane().V1Beta2UpToDateReplicas("v1beta2").Path()).To(Equal(Path{"status", "upToDateReplicas"})) + g.Expect(ControlPlane().AvailableReplicas().Path()).To(Equal(Path{"status", "availableReplicas"})) - err = ControlPlane().V1Beta2UpToDateReplicas("v1beta2").Set(obj, 3) + err := ControlPlane().AvailableReplicas().Set(obj, int32(3)) g.Expect(err).ToNot(HaveOccurred()) - got, err = ControlPlane().V1Beta2UpToDateReplicas("v1beta2").Get(obj) + got, err := ControlPlane().AvailableReplicas().Get(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).ToNot(BeNil()) g.Expect(*got).To(Equal(int32(3))) diff --git a/internal/contract/types.go b/internal/contract/types.go index eeaf809ac055..6dbdae7ebad9 100644 --- a/internal/contract/types.go +++ b/internal/contract/types.go @@ -127,8 +127,8 @@ func (i *Int32) Get(obj *unstructured.Unstructured) (*int32, error) { // Set sets the int32 value in the path. // Note: Cluster API should never Set values on external objects owner by providers; however this method is useful for writing tests. -func (i *Int32) Set(obj *unstructured.Unstructured, value int64) error { - if err := unstructured.SetNestedField(obj.UnstructuredContent(), value, i.path...); err != nil { +func (i *Int32) Set(obj *unstructured.Unstructured, value int32) error { + if err := unstructured.SetNestedField(obj.UnstructuredContent(), int64(value), i.path...); err != nil { return errors.Wrapf(err, "failed to set path %s of object %v", "."+strings.Join(i.path, "."), obj.GroupVersionKind()) } return nil diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index 294ed8348e10..a059f64bbd0b 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -109,18 +109,25 @@ func setControlPlaneReplicas(_ context.Context, cluster *clusterv1.Cluster, cont } if replicas, err := contract.ControlPlane().Replicas().Get(controlPlane); err == nil && replicas != nil { - cluster.Status.ControlPlane.DesiredReplicas = ptr.To(int32(*replicas)) + cluster.Status.ControlPlane.DesiredReplicas = ptr.To(*replicas) } if replicas, err := contract.ControlPlane().StatusReplicas().Get(controlPlane); err == nil && replicas != nil { - cluster.Status.ControlPlane.Replicas = ptr.To(int32(*replicas)) + cluster.Status.ControlPlane.Replicas = ptr.To(*replicas) } - if replicas, err := contract.ControlPlane().V1Beta2ReadyReplicas(controlPlaneContractVersion).Get(controlPlane); err == nil && replicas != nil { + if replicas, err := contract.ControlPlane().ReadyReplicas().Get(controlPlane); err == nil && replicas != nil { cluster.Status.ControlPlane.ReadyReplicas = replicas } - if replicas, err := contract.ControlPlane().V1Beta2AvailableReplicas(controlPlaneContractVersion).Get(controlPlane); err == nil && replicas != nil { - cluster.Status.ControlPlane.AvailableReplicas = replicas + if controlPlaneContractVersion == "v1beta1" { + if replicas, err := contract.ControlPlane().V1Beta1UnavailableReplicas().Get(controlPlane); err == nil && replicas != nil { + cluster.Status.ControlPlane.AvailableReplicas = ptr.To(ptr.Deref(cluster.Status.ControlPlane.DesiredReplicas, 0) - int32(ptr.Deref(replicas, 0))) + } + } else { + if replicas, err := contract.ControlPlane().AvailableReplicas().Get(controlPlane); err == nil && replicas != nil { + cluster.Status.ControlPlane.AvailableReplicas = replicas + } } - if replicas, err := contract.ControlPlane().V1Beta2UpToDateReplicas(controlPlaneContractVersion).Get(controlPlane); err == nil && replicas != nil { + + if replicas, err := contract.ControlPlane().UpToDateReplicas(controlPlaneContractVersion).Get(controlPlane); err == nil && replicas != nil { cluster.Status.ControlPlane.UpToDateReplicas = replicas } return @@ -1132,14 +1139,14 @@ func infrastructureReadyFallBackMessage(kind string, ready bool) string { if ready { return "" } - return fmt.Sprintf("%s status.ready is %t", kind, ready) + return fmt.Sprintf("%s status.initialization.provisioned is %t", kind, ready) } func controlPlaneAvailableFallBackMessage(kind string, ready bool) string { if ready { return "" } - return fmt.Sprintf("%s status.ready is %t", kind, ready) + return fmt.Sprintf("%s status.initialization.controlPlaneInitialized is %t", kind, ready) } func aggregateUnhealthyMachines(machines collections.Machines) string { diff --git a/internal/controllers/cluster/cluster_controller_status_test.go b/internal/controllers/cluster/cluster_controller_status_test.go index 0b98fc0c073c..0d125e10ee22 100644 --- a/internal/controllers/cluster/cluster_controller_status_test.go +++ b/internal/controllers/cluster/cluster_controller_status_test.go @@ -67,7 +67,7 @@ func TestSetControlPlaneReplicas(t *testing.T) { { name: "set counters for cluster with control plane, reporting counters", cluster: fakeCluster("c", controlPlaneRef{}), - controlPlane: fakeControlPlane("cp", desiredReplicas(3), currentReplicas(2), v1beta2ReadyReplicas(1), v1beta2AvailableReplicas(2), v1beta2UpToDateReplicas(0)), + controlPlane: fakeControlPlane("cp", desiredReplicas(3), currentReplicas(2), readyReplicas(1), availableReplicas(2), upToDateReplicas(0)), expectDesiredReplicas: ptr.To(int32(3)), expectReplicas: ptr.To(int32(2)), expectReadyReplicas: ptr.To(int32(1)), @@ -166,14 +166,14 @@ func TestSetWorkersReplicas(t *testing.T) { name: "should count workers from different objects", cluster: fakeCluster("c", controlPlaneRef{}), machinePools: expv1.MachinePoolList{Items: []expv1.MachinePool{ - *fakeMachinePool("mp1", desiredReplicas(1), currentReplicas(2), v1beta2ReadyReplicas(3), v1beta2AvailableReplicas(4), v1beta2UpToDateReplicas(5)), + *fakeMachinePool("mp1", desiredReplicas(1), currentReplicas(2), readyReplicas(3), availableReplicas(4), upToDateReplicas(5)), }}, machineDeployments: clusterv1.MachineDeploymentList{Items: []clusterv1.MachineDeployment{ - *fakeMachineDeployment("md1", desiredReplicas(11), currentReplicas(12), v1beta2ReadyReplicas(13), v1beta2AvailableReplicas(14), v1beta2UpToDateReplicas(15)), + *fakeMachineDeployment("md1", desiredReplicas(11), currentReplicas(12), readyReplicas(13), availableReplicas(14), upToDateReplicas(15)), }}, machineSets: clusterv1.MachineSetList{Items: []clusterv1.MachineSet{ - *fakeMachineSet("ms1", OwnedByCluster("c"), desiredReplicas(21), currentReplicas(22), v1beta2ReadyReplicas(23), v1beta2AvailableReplicas(24), v1beta2UpToDateReplicas(25)), - *fakeMachineSet("ms2", desiredReplicas(31), currentReplicas(32), v1beta2ReadyReplicas(33), v1beta2AvailableReplicas(34), v1beta2UpToDateReplicas(35)), // not owned by the cluster + *fakeMachineSet("ms1", OwnedByCluster("c"), desiredReplicas(21), currentReplicas(22), readyReplicas(23), availableReplicas(24), upToDateReplicas(25)), + *fakeMachineSet("ms2", desiredReplicas(31), currentReplicas(32), readyReplicas(33), availableReplicas(34), upToDateReplicas(35)), // not owned by the cluster }}, workerMachines: collections.FromMachines( // 4 replicas, 2 Ready, 3 Available, 1 UpToDate fakeMachine("m1", OwnedByCluster("c"), condition{Type: clusterv1.MachineAvailableCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionTrue}, condition{Type: clusterv1.MachineUpToDateCondition, Status: metav1.ConditionTrue}), @@ -270,7 +270,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { Type: clusterv1.ClusterInfrastructureReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureNotReadyReason, - Message: "FakeInfraCluster status.ready is false", + Message: "FakeInfraCluster status.initialization.provisioned is false", }, }, { @@ -435,7 +435,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { Type: clusterv1.ClusterControlPlaneAvailableCondition, Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneNotAvailableReason, - Message: "FakeControlPlane status.ready is false", + Message: "FakeControlPlane status.initialization.controlPlaneInitialized is false", }, }, { @@ -2937,7 +2937,7 @@ func (r controlPlaneInitialized) ApplyToCluster(c *clusterv1.Cluster) { type desiredReplicas int32 func (r desiredReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { - _ = contract.ControlPlane().Replicas().Set(cp, int64(r)) + _ = contract.ControlPlane().Replicas().Set(cp, int32(r)) } func (r desiredReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { @@ -2955,7 +2955,7 @@ func (r desiredReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { type currentReplicas int32 func (r currentReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { - _ = contract.ControlPlane().StatusReplicas().Set(cp, int64(r)) + _ = contract.ControlPlane().StatusReplicas().Set(cp, int32(r)) } func (r currentReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { @@ -2970,57 +2970,57 @@ func (r currentReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { ms.Status.Replicas = int32(r) } -type v1beta2ReadyReplicas int32 +type readyReplicas int32 -func (r v1beta2ReadyReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { - _ = contract.ControlPlane().V1Beta2ReadyReplicas(contract.Version).Set(cp, int64(r)) +func (r readyReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { + _ = contract.ControlPlane().ReadyReplicas().Set(cp, int32(r)) } -func (r v1beta2ReadyReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { +func (r readyReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { mp.Status.ReadyReplicas = ptr.To(int32(r)) } -func (r v1beta2ReadyReplicas) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { +func (r readyReplicas) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { md.Status.ReadyReplicas = ptr.To(int32(r)) } -func (r v1beta2ReadyReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { +func (r readyReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { ms.Status.ReadyReplicas = ptr.To(int32(r)) } -type v1beta2AvailableReplicas int32 +type availableReplicas int32 -func (r v1beta2AvailableReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { - _ = contract.ControlPlane().V1Beta2AvailableReplicas(contract.Version).Set(cp, int64(r)) +func (r availableReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { + _ = contract.ControlPlane().AvailableReplicas().Set(cp, int32(r)) } -func (r v1beta2AvailableReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { +func (r availableReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { mp.Status.AvailableReplicas = ptr.To(int32(r)) } -func (r v1beta2AvailableReplicas) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { +func (r availableReplicas) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { md.Status.AvailableReplicas = ptr.To(int32(r)) } -func (r v1beta2AvailableReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { +func (r availableReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { ms.Status.AvailableReplicas = ptr.To(int32(r)) } -type v1beta2UpToDateReplicas int32 +type upToDateReplicas int32 -func (r v1beta2UpToDateReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { - _ = contract.ControlPlane().V1Beta2UpToDateReplicas(contract.Version).Set(cp, int64(r)) +func (r upToDateReplicas) ApplyToControlPlane(cp *unstructured.Unstructured) { + _ = contract.ControlPlane().UpToDateReplicas(contract.Version).Set(cp, int32(r)) } -func (r v1beta2UpToDateReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { +func (r upToDateReplicas) ApplyToMachinePool(mp *expv1.MachinePool) { mp.Status.UpToDateReplicas = ptr.To(int32(r)) } -func (r v1beta2UpToDateReplicas) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { +func (r upToDateReplicas) ApplyToMachineDeployment(md *clusterv1.MachineDeployment) { md.Status.UpToDateReplicas = ptr.To(int32(r)) } -func (r v1beta2UpToDateReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { +func (r upToDateReplicas) ApplyToMachineSet(ms *clusterv1.MachineSet) { ms.Status.UpToDateReplicas = ptr.To(int32(r)) } diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index f278d0661ed9..575ca851d828 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -157,7 +157,7 @@ func bootstrapConfigReadyFallBackMessage(kind string, ready bool) string { if ready { return "" } - return fmt.Sprintf("%s status.ready is %t", kind, ready) + return fmt.Sprintf("%s status.initialization.dataSecretCreated is %t", kind, ready) } func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machine, infraMachine *unstructured.Unstructured, infraMachineIsNotFound bool) { @@ -252,7 +252,7 @@ func infrastructureReadyFallBackMessage(kind string, ready bool) string { if ready { return "" } - return fmt.Sprintf("%s status.ready is %t", kind, ready) + return fmt.Sprintf("%s status.initialization.provisioned is %t", kind, ready) } func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, node *corev1.Node, nodeGetErr error, lastProbeSuccessTime time.Time, remoteConditionsGracePeriod time.Duration) { diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 10485b0a90ed..49365be75e60 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -161,7 +161,7 @@ func TestSetBootstrapReadyCondition(t *testing.T) { Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigNotReadyReason, - Message: "GenericBootstrapConfig status.ready is false", + Message: "GenericBootstrapConfig status.initialization.dataSecretCreated is false", }, }, { @@ -385,7 +385,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureNotReadyReason, - Message: "GenericInfrastructureMachine status.ready is false", + Message: "GenericInfrastructureMachine status.initialization.provisioned is false", }, }, { diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 278bc63ed0d0..3153514adcb7 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1161,8 +1161,8 @@ func TestMachineV1Beta1Conditions(t *testing.T) { infraProvisioned: false, bootstrapDataSecretCreated: true, conditionsToAssert: []metav1.Condition{ - {Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureNotReadyReason, Message: "GenericInfrastructureMachine status.ready is false"}, - {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* InfrastructureReady: GenericInfrastructureMachine status.ready is false"}, + {Type: clusterv1.MachineInfrastructureReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureNotReadyReason, Message: "GenericInfrastructureMachine status.initialization.provisioned is false"}, + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* InfrastructureReady: GenericInfrastructureMachine status.initialization.provisioned is false"}, }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.InfrastructureReadyV1Beta1Condition, clusterv1.WaitingForInfrastructureFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), @@ -1192,8 +1192,8 @@ func TestMachineV1Beta1Conditions(t *testing.T) { infraProvisioned: true, bootstrapDataSecretCreated: false, conditionsToAssert: []metav1.Condition{ - {Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigNotReadyReason, Message: "GenericBootstrapConfig status.ready is false"}, - {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* BootstrapConfigReady: GenericBootstrapConfig status.ready is false"}, + {Type: clusterv1.MachineBootstrapConfigReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigNotReadyReason, Message: "GenericBootstrapConfig status.initialization.dataSecretCreated is false"}, + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* BootstrapConfigReady: GenericBootstrapConfig status.initialization.dataSecretCreated is false"}, }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ v1beta1conditions.FalseCondition(clusterv1.BootstrapReadyV1Beta1Condition, clusterv1.WaitingForDataSecretFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, ""), @@ -1207,7 +1207,7 @@ func TestMachineV1Beta1Conditions(t *testing.T) { infraProvisioned: false, bootstrapDataSecretCreated: false, conditionsToAssert: []metav1.Condition{ - {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* BootstrapConfigReady: GenericBootstrapConfig status.ready is false\n* InfrastructureReady: GenericInfrastructureMachine status.ready is false"}, + {Type: clusterv1.MachineReadyCondition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyReason, Message: "* BootstrapConfigReady: GenericBootstrapConfig status.initialization.dataSecretCreated is false\n* InfrastructureReady: GenericInfrastructureMachine status.initialization.provisioned is false"}, }, v1beta1ConditionsToAssert: []*clusterv1.Condition{ // in V1beta1 ready condition summary consumes reason from the infra condition diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 9316bb91032e..83a798cf7068 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -65,6 +65,8 @@ func TestMachineDeploymentReconciler(t *testing.T) { // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. patch := client.MergeFrom(cluster.DeepCopy()) cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} + + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedReason, LastTransitionTime: metav1.Now()}} g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed()) return ns, cluster @@ -432,8 +434,11 @@ func TestMachineDeploymentReconciler(t *testing.T) { if !metav1.IsControlledBy(&m, newestMachineSet) { continue } - providerID := fakeInfrastructureRefProvisioned(m.Spec.InfrastructureRef, infraResource, g) - fakeMachineNodeRef(&m, providerID, g) + + if m.Status.NodeRef == nil { + providerID := fakeInfrastructureRefProvisioned(m.Spec.InfrastructureRef, infraResource, g) + fakeMachineNodeRef(&m, providerID, g) + } } if err := env.List(ctx, machineSets, msListOpts...); err != nil { diff --git a/internal/controllers/machinedeployment/machinedeployment_rolling.go b/internal/controllers/machinedeployment/machinedeployment_rolling.go index 2d3db1ceb7c0..c832cbe9f661 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rolling.go +++ b/internal/controllers/machinedeployment/machinedeployment_rolling.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -157,11 +158,7 @@ func (r *Reconciler) reconcileOldMachineSets(ctx context.Context, allMSs []*clus // * The new MachineSet created must start with 0 replicas because allMachinesCount is already at 13. // * However, newMSMachinesUnavailable would also be 0, so the 2 old MachineSets could be scaled down by 5 (13 - 8 - 0), which would then // allow the new MachineSet to be scaled up by 5. - // TODO (v1beta2) Use new replica counters - availableReplicas := int32(0) - if newMS.Status.Deprecated != nil && newMS.Status.Deprecated.V1Beta1 != nil { - availableReplicas = newMS.Status.Deprecated.V1Beta1.AvailableReplicas - } + availableReplicas := ptr.Deref(newMS.Status.AvailableReplicas, 0) minAvailable := *(deployment.Spec.Replicas) - maxUnavailable newMSUnavailableMachineCount := *(newMS.Spec.Replicas) - availableReplicas @@ -218,11 +215,7 @@ func (r *Reconciler) cleanupUnhealthyReplicas(ctx context.Context, oldMSs []*clu continue } - // TODO (v1beta2) Use new replica counters - oldMSAvailableReplicas := int32(0) - if targetMS.Status.Deprecated != nil && targetMS.Status.Deprecated.V1Beta1 != nil { - oldMSAvailableReplicas = targetMS.Status.Deprecated.V1Beta1.AvailableReplicas - } + oldMSAvailableReplicas := ptr.Deref(targetMS.Status.AvailableReplicas, 0) log.V(4).Info("Found available Machines in old MachineSet", "count", oldMSAvailableReplicas, "target-machineset", client.ObjectKeyFromObject(targetMS).String()) if oldMSReplicas == oldMSAvailableReplicas { @@ -263,7 +256,7 @@ func (r *Reconciler) scaleDownOldMachineSetsForRollingUpdate(ctx context.Context minAvailable := *(deployment.Spec.Replicas) - maxUnavailable // Find the number of available machines. - availableMachineCount := mdutil.GetAvailableReplicaCountForMachineSets(allMSs) + availableMachineCount := ptr.Deref(mdutil.GetAvailableReplicaCountForMachineSets(allMSs), 0) // Check if we can scale down. if availableMachineCount <= minAvailable { diff --git a/internal/controllers/machinedeployment/machinedeployment_rolling_test.go b/internal/controllers/machinedeployment/machinedeployment_rolling_test.go index 696efda400ad..5977f34b7623 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rolling_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_rolling_test.go @@ -367,11 +367,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Replicas: ptr.To[int32](0), }, Status: clusterv1.MachineSetStatus{ - Deprecated: &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - AvailableReplicas: 2, - }, - }, + AvailableReplicas: ptr.To[int32](2), }, }, oldMachineSets: []*clusterv1.MachineSet{ @@ -384,11 +380,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ - Deprecated: &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - AvailableReplicas: 2, - }, - }, + AvailableReplicas: ptr.To[int32](2), }, }, { @@ -400,11 +392,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Replicas: ptr.To[int32](1), }, Status: clusterv1.MachineSetStatus{ - Deprecated: &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - AvailableReplicas: 1, - }, - }, + AvailableReplicas: ptr.To[int32](1), }, }, }, @@ -438,12 +426,6 @@ func TestReconcileOldMachineSets(t *testing.T) { }, Status: clusterv1.MachineSetStatus{ Replicas: 5, - Deprecated: &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - ReadyReplicas: 0, - AvailableReplicas: 0, - }, - }, }, }, oldMachineSets: []*clusterv1.MachineSet{ @@ -456,13 +438,9 @@ func TestReconcileOldMachineSets(t *testing.T) { Replicas: ptr.To[int32](8), }, Status: clusterv1.MachineSetStatus{ - Replicas: 10, - Deprecated: &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - ReadyReplicas: 8, - AvailableReplicas: 8, - }, - }, + Replicas: 10, + ReadyReplicas: ptr.To[int32](8), + AvailableReplicas: ptr.To[int32](8), }, }, }, diff --git a/internal/controllers/machinedeployment/machinedeployment_status.go b/internal/controllers/machinedeployment/machinedeployment_status.go index dba311feb70d..00b12313669c 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status.go +++ b/internal/controllers/machinedeployment/machinedeployment_status.go @@ -75,16 +75,16 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) (retErr error) return retErr } -// setReplicas sets replicas in the v1beta2 status. +// setReplicas sets replicas in status. // Note: this controller computes replicas several time during a reconcile, because those counters are // used by low level operations to take decisions, but also those decisions might impact the very same the counters // e.g. scale up MachinesSet is based on counters and it can change the value on MachineSet's replica number; // as a consequence it is required to compute the counters again before calling scale down machine sets, // and again to before computing the overall availability of the Machine deployment. func setReplicas(machineDeployment *clusterv1.MachineDeployment, machineSets []*clusterv1.MachineSet) { - machineDeployment.Status.ReadyReplicas = mdutil.GetV1Beta2ReadyReplicaCountForMachineSets(machineSets) - machineDeployment.Status.AvailableReplicas = mdutil.GetV1Beta2AvailableReplicaCountForMachineSets(machineSets) - machineDeployment.Status.UpToDateReplicas = mdutil.GetV1Beta2UptoDateReplicaCountForMachineSets(machineSets) + machineDeployment.Status.ReadyReplicas = mdutil.GetReadyReplicaCountForMachineSets(machineSets) + machineDeployment.Status.AvailableReplicas = mdutil.GetAvailableReplicaCountForMachineSets(machineSets) + machineDeployment.Status.UpToDateReplicas = mdutil.GetUptoDateReplicaCountForMachineSets(machineSets) } func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.MachineDeployment, getAndAdoptMachineSetsForDeploymentSucceeded bool) { @@ -110,13 +110,13 @@ func setAvailableCondition(_ context.Context, machineDeployment *clusterv1.Machi return } - // Surface if .status.v1beta2.availableReplicas is not yet set. + // Surface if .status.availableReplicas is not yet set. if machineDeployment.Status.AvailableReplicas == nil { conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentAvailableCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentAvailableWaitingForAvailableReplicasSetReason, - Message: "Waiting for status.v1beta2.availableReplicas set", + Message: "Waiting for status.availableReplicas set", }) return } diff --git a/internal/controllers/machinedeployment/machinedeployment_status_test.go b/internal/controllers/machinedeployment/machinedeployment_status_test.go index f0cabbc3df6c..fea1e7535d86 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_status_test.go @@ -120,7 +120,7 @@ func Test_setAvailableCondition(t *testing.T) { Type: clusterv1.MachineDeploymentAvailableCondition, Status: metav1.ConditionUnknown, Reason: clusterv1.MachineDeploymentAvailableWaitingForAvailableReplicasSetReason, - Message: "Waiting for status.v1beta2.availableReplicas set", + Message: "Waiting for status.availableReplicas set", }, }, { diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index eab4e14dfc5d..2c6bdaae8cf6 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -477,18 +477,17 @@ func (r *Reconciler) scale(ctx context.Context, deployment *clusterv1.MachineDep // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary. func (r *Reconciler) syncDeploymentStatus(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, md *clusterv1.MachineDeployment) error { - md.Status = calculateStatus(allMSs, newMS, md) + md.Status = calculateV1Beta1Status(allMSs, newMS, md) // minReplicasNeeded will be equal to md.Spec.Replicas when the strategy is not RollingUpdateMachineDeploymentStrategyType. minReplicasNeeded := *(md.Spec.Replicas) - mdutil.MaxUnavailable(*md) - // TODO: v1beta2 availableReplicas := int32(0) if md.Status.Deprecated != nil && md.Status.Deprecated.V1Beta1 != nil { availableReplicas = md.Status.Deprecated.V1Beta1.AvailableReplicas } if availableReplicas >= minReplicasNeeded { - // NOTE: The structure of calculateStatus() does not allow us to update the machinedeployment directly, we can only update the status obj it returns. Ideally, we should change calculateStatus() --> updateStatus() to be consistent with the rest of the code base, until then, we update conditions here. + // NOTE: The structure of calculateV1Beta1Status() does not allow us to update the machinedeployment directly, we can only update the status obj it returns. Ideally, we should change calculateV1Beta1Status() --> updateStatus() to be consistent with the rest of the code base, until then, we update conditions here. v1beta1conditions.MarkTrue(md, clusterv1.MachineDeploymentAvailableV1Beta1Condition) } else { v1beta1conditions.MarkFalse(md, clusterv1.MachineDeploymentAvailableV1Beta1Condition, clusterv1.WaitingForAvailableMachinesV1Beta1Reason, clusterv1.ConditionSeverityWarning, "Minimum availability requires %d replicas, current %d available", minReplicasNeeded, md.Status.AvailableReplicas) @@ -504,15 +503,15 @@ func (r *Reconciler) syncDeploymentStatus(allMSs []*clusterv1.MachineSet, newMS v1beta1conditions.MarkFalse(md, clusterv1.MachineSetReadyV1Beta1Condition, clusterv1.WaitingForMachineSetFallbackV1Beta1Reason, clusterv1.ConditionSeverityInfo, "MachineSet not found") } - // Set v1beta replica counters on MD status. + // Set replica counters on MD status. setReplicas(md, allMSs) return nil } -// calculateStatus calculates the latest status for the provided deployment by looking into the provided MachineSets. -func calculateStatus(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) clusterv1.MachineDeploymentStatus { - availableReplicas := mdutil.GetAvailableReplicaCountForMachineSets(allMSs) +// calculateV1Beta1Status calculates the latest status for the provided deployment by looking into the provided MachineSets. +func calculateV1Beta1Status(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) clusterv1.MachineDeploymentStatus { + availableReplicas := mdutil.GetV1Beta1AvailableReplicaCountForMachineSets(allMSs) totalReplicas := mdutil.GetReplicaCountForMachineSets(allMSs) unavailableReplicas := totalReplicas - availableReplicas @@ -525,7 +524,7 @@ func calculateStatus(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet // Calculate the label selector. We check the error in the MD reconcile function, ignore here. selector, _ := metav1.LabelSelectorAsSelector(&deployment.Spec.Selector) - // TODO (v1beta2) Use new replica counters + // Carry over deprecated v1beta1 conditions if defined. var conditions clusterv1.Conditions if deployment.Status.Deprecated != nil && deployment.Status.Deprecated.V1Beta1 != nil { conditions = deployment.Status.Deprecated.V1Beta1.Conditions @@ -540,7 +539,7 @@ func calculateStatus(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ Conditions: conditions, UpdatedReplicas: mdutil.GetActualReplicaCountForMachineSets([]*clusterv1.MachineSet{newMS}), - ReadyReplicas: mdutil.GetReadyReplicaCountForMachineSets(allMSs), + ReadyReplicas: mdutil.GetV1Beta1ReadyReplicaCountForMachineSets(allMSs), AvailableReplicas: availableReplicas, UnavailableReplicas: unavailableReplicas, }, diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index e91623d13509..8f65fa71738d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -39,7 +39,7 @@ import ( v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) -func TestCalculateStatus(t *testing.T) { +func TestCalculateV1Beta1Status(t *testing.T) { var tests = map[string]struct { machineSets []*clusterv1.MachineSet newMachineSet *clusterv1.MachineSet @@ -217,7 +217,7 @@ func TestCalculateStatus(t *testing.T) { t.Run(name, func(t *testing.T) { g := NewWithT(t) - actualStatus := calculateStatus(test.machineSets, test.newMachineSet, test.deployment) + actualStatus := calculateV1Beta1Status(test.machineSets, test.newMachineSet, test.deployment) g.Expect(actualStatus).To(BeComparableTo(test.expectedStatus)) }) } @@ -391,7 +391,7 @@ func TestScaleMachineSet(t *testing.T) { } } -func newTestMachineDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions clusterv1.Conditions) *clusterv1.MachineDeployment { +func newTestMachineDeployment(pds *int32, replicas, statusReplicas, upToDateReplicas, availableReplicas int32) *clusterv1.MachineDeployment { d := &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "progress-test", @@ -409,21 +409,16 @@ func newTestMachineDeployment(pds *int32, replicas, statusReplicas, updatedRepli }, }, Status: clusterv1.MachineDeploymentStatus{ - Replicas: statusReplicas, - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: updatedReplicas, - AvailableReplicas: availableReplicas, - Conditions: conditions, - }, - }, + Replicas: statusReplicas, + UpToDateReplicas: ptr.To[int32](upToDateReplicas), + AvailableReplicas: ptr.To[int32](availableReplicas), }, } return d } // helper to create MS with given availableReplicas. -func newTestMachinesetWithReplicas(name string, specReplicas, statusReplicas, availableReplicas int32, conditions clusterv1.Conditions) *clusterv1.MachineSet { +func newTestMachinesetWithReplicas(name string, specReplicas, statusReplicas, availableReplicas int32, v1Beta1Conditions clusterv1.Conditions) *clusterv1.MachineSet { return &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -434,11 +429,11 @@ func newTestMachinesetWithReplicas(name string, specReplicas, statusReplicas, av Replicas: ptr.To[int32](specReplicas), }, Status: clusterv1.MachineSetStatus{ - Replicas: statusReplicas, + Replicas: statusReplicas, + AvailableReplicas: ptr.To[int32](availableReplicas), Deprecated: &clusterv1.MachineSetDeprecatedStatus{ V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - AvailableReplicas: availableReplicas, - Conditions: conditions, + Conditions: v1Beta1Conditions, }, }, }, @@ -456,9 +451,9 @@ func TestSyncDeploymentStatus(t *testing.T) { }{ { name: "Deployment not available: MachineDeploymentAvailableCondition should exist and be false", - d: newTestMachineDeployment(&pds, 3, 2, 2, 2, clusterv1.Conditions{}), + d: newTestMachineDeployment(&pds, 3, 2, 2, 2), oldMachineSets: []*clusterv1.MachineSet{}, - newMachineSet: newTestMachinesetWithReplicas("foo", 3, 2, 2, clusterv1.Conditions{}), + newMachineSet: newTestMachinesetWithReplicas("foo", 3, 2, 2, nil), expectedConditions: []*clusterv1.Condition{ { Type: clusterv1.MachineDeploymentAvailableV1Beta1Condition, @@ -470,9 +465,9 @@ func TestSyncDeploymentStatus(t *testing.T) { }, { name: "Deployment Available: MachineDeploymentAvailableCondition should exist and be true", - d: newTestMachineDeployment(&pds, 3, 3, 3, 3, clusterv1.Conditions{}), + d: newTestMachineDeployment(&pds, 3, 3, 3, 3), oldMachineSets: []*clusterv1.MachineSet{}, - newMachineSet: newTestMachinesetWithReplicas("foo", 3, 3, 3, clusterv1.Conditions{}), + newMachineSet: newTestMachinesetWithReplicas("foo", 3, 3, 3, nil), expectedConditions: []*clusterv1.Condition{ { Type: clusterv1.MachineDeploymentAvailableV1Beta1Condition, @@ -482,7 +477,7 @@ func TestSyncDeploymentStatus(t *testing.T) { }, { name: "MachineSet exist: MachineSetReadyCondition should exist and mirror MachineSet Ready condition", - d: newTestMachineDeployment(&pds, 3, 3, 3, 3, clusterv1.Conditions{}), + d: newTestMachineDeployment(&pds, 3, 3, 3, 3), oldMachineSets: []*clusterv1.MachineSet{}, newMachineSet: newTestMachinesetWithReplicas("foo", 3, 3, 3, clusterv1.Conditions{ { @@ -503,7 +498,7 @@ func TestSyncDeploymentStatus(t *testing.T) { }, { name: "MachineSet doesn't exist: MachineSetReadyCondition should exist and be false", - d: newTestMachineDeployment(&pds, 3, 3, 3, 3, clusterv1.Conditions{}), + d: newTestMachineDeployment(&pds, 3, 3, 3, 3), oldMachineSets: []*clusterv1.MachineSet{}, newMachineSet: nil, expectedConditions: []*clusterv1.Condition{ diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 51f24a80232c..e39f07c4f82d 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -565,12 +565,11 @@ func TotalMachineSetsReplicaSum(machineSets []*clusterv1.MachineSet) int32 { return totalReplicas } -// GetReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. -func GetReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { +// GetV1Beta1ReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. +func GetV1Beta1ReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { totalReadyReplicas := int32(0) for _, ms := range machineSets { if ms != nil { - // TODO (v1beta2) Use new replica counters readyReplicas := int32(0) if ms.Status.Deprecated != nil && ms.Status.Deprecated.V1Beta1 != nil { readyReplicas = ms.Status.Deprecated.V1Beta1.ReadyReplicas @@ -581,8 +580,8 @@ func GetReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int return totalReadyReplicas } -// GetAvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. -func GetAvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { +// GetV1Beta1AvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. +func GetV1Beta1AvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { totalAvailableReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -596,9 +595,9 @@ func GetAvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) return totalAvailableReplicas } -// GetV1Beta2ReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. +// GetReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. // Note: When none of the ms.Status.V1Beta2.ReadyReplicas are set, the func returns nil. -func GetV1Beta2ReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) *int32 { +func GetReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) *int32 { var totalReadyReplicas *int32 for _, ms := range machineSets { if ms != nil && ms.Status.ReadyReplicas != nil { @@ -608,9 +607,9 @@ func GetV1Beta2ReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineS return totalReadyReplicas } -// GetV1Beta2AvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. +// GetAvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. // Note: When none of the ms.Status.V1Beta2.AvailableReplicas are set, the func returns nil. -func GetV1Beta2AvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) *int32 { +func GetAvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) *int32 { var totalAvailableReplicas *int32 for _, ms := range machineSets { if ms != nil && ms.Status.AvailableReplicas != nil { @@ -620,9 +619,9 @@ func GetV1Beta2AvailableReplicaCountForMachineSets(machineSets []*clusterv1.Mach return totalAvailableReplicas } -// GetV1Beta2UptoDateReplicaCountForMachineSets returns the number of up to date machines corresponding to the given machine sets. +// GetUptoDateReplicaCountForMachineSets returns the number of up to date machines corresponding to the given machine sets. // Note: When none of the ms.Status.V1Beta2.UpToDateReplicas are set, the func returns nil. -func GetV1Beta2UptoDateReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) *int32 { +func GetUptoDateReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) *int32 { var totalUpToDateReplicas *int32 for _, ms := range machineSets { if ms != nil && ms.Status.UpToDateReplicas != nil { @@ -640,15 +639,8 @@ func IsRollingUpdate(deployment *clusterv1.MachineDeployment) bool { // DeploymentComplete considers a deployment to be complete once all of its desired replicas // are updated and available, and no old machines are running. func DeploymentComplete(deployment *clusterv1.MachineDeployment, newStatus *clusterv1.MachineDeploymentStatus) bool { - // TODO (v1beta2) Use new replica counters - updatedReplicas := int32(0) - if newStatus.Deprecated != nil && newStatus.Deprecated.V1Beta1 != nil { - updatedReplicas = newStatus.Deprecated.V1Beta1.UpdatedReplicas - } - availableReplicas := int32(0) - if newStatus.Deprecated != nil && newStatus.Deprecated.V1Beta1 != nil { - availableReplicas = newStatus.Deprecated.V1Beta1.AvailableReplicas - } + updatedReplicas := ptr.Deref(newStatus.UpToDateReplicas, 0) + availableReplicas := ptr.Deref(newStatus.AvailableReplicas, 0) return updatedReplicas == *(deployment.Spec.Replicas) && newStatus.Replicas == *(deployment.Spec.Replicas) && availableReplicas == *(deployment.Spec.Replicas) && @@ -709,11 +701,7 @@ func IsSaturated(deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineS if err != nil { return false } - // TODO (v1beta2) Use new replica counters - availableReplicas := int32(0) - if ms.Status.Deprecated != nil && ms.Status.Deprecated.V1Beta1 != nil { - availableReplicas = ms.Status.Deprecated.V1Beta1.AvailableReplicas - } + availableReplicas := ptr.Deref(ms.Status.AvailableReplicas, 0) return *(ms.Spec.Replicas) == *(deployment.Spec.Replicas) && int32(desired) == *(deployment.Spec.Replicas) && availableReplicas == *(deployment.Spec.Replicas) diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index c32a3b71d5dc..f5228b732470 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -770,7 +770,7 @@ func TestNewMSNewReplicas(t *testing.T) { } func TestDeploymentComplete(t *testing.T) { - deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *clusterv1.MachineDeployment { + deployment := func(desired, current, upToDate, available, maxUnavailable, maxSurge int32) *clusterv1.MachineDeployment { return &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ Replicas: &desired, @@ -783,13 +783,9 @@ func TestDeploymentComplete(t *testing.T) { }, }, Status: clusterv1.MachineDeploymentStatus{ - Replicas: current, - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: updated, - AvailableReplicas: available, - }, - }, + Replicas: current, + UpToDateReplicas: ptr.To[int32](upToDate), + AvailableReplicas: ptr.To[int32](available), }, } } @@ -935,11 +931,7 @@ func TestAnnotationUtils(t *testing.T) { // Test Case 2: Check if annotations reflect deployments state tMS.Annotations[clusterv1.DesiredReplicasAnnotation] = "1" - tMS.Status.Deprecated = &clusterv1.MachineSetDeprecatedStatus{ - V1Beta1: &clusterv1.MachineSetV1Beta1DeprecatedStatus{ - AvailableReplicas: 1, - }, - } + tMS.Status.AvailableReplicas = ptr.To[int32](1) tMS.Spec.Replicas = new(int32) *tMS.Spec.Replicas = 1 diff --git a/internal/controllers/machinedeployment/suite_test.go b/internal/controllers/machinedeployment/suite_test.go index da7ca5442ec3..b1b6d3db2e93 100644 --- a/internal/controllers/machinedeployment/suite_test.go +++ b/internal/controllers/machinedeployment/suite_test.go @@ -198,7 +198,12 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { // Patch the node and make it look like ready. patchNode := client.MergeFrom(node.DeepCopy()) - node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue}) + node.Status.Conditions = append(node.Status.Conditions, + corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + corev1.NodeCondition{Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse}, + corev1.NodeCondition{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse}, + corev1.NodeCondition{Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse}, + ) g.Expect(env.Status().Patch(ctx, node, patchNode)).To(Succeed()) // Patch the Machine. diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 836e48658578..9ddb7796900a 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -206,7 +206,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retres ct } defer func() { - if err := r.reconcileStatus(ctx, s); err != nil { + if err := r.reconcileV1Beta1Status(ctx, s); err != nil { reterr = kerrors.NewAggregate([]error{reterr, errors.Wrapf(err, "failed to update status")}) } @@ -1143,9 +1143,9 @@ func (r *Reconciler) shouldAdopt(ms *clusterv1.MachineSet) bool { return !isDeploymentChild(ms) } -// reconcileStatus updates the Status field for the MachineSet +// reconcileV1Beta1Status updates the Status field for the MachineSet // It checks for the current state of the replicas and updates the Status of the MachineSet. -func (r *Reconciler) reconcileStatus(ctx context.Context, s *scope) error { +func (r *Reconciler) reconcileV1Beta1Status(ctx context.Context, s *scope) error { if !s.getAndAdoptMachinesForMachineSetSucceeded { return nil } @@ -1212,7 +1212,6 @@ func (r *Reconciler) reconcileStatus(ctx context.Context, s *scope) error { } newStatus.Replicas = int32(len(filteredMachines)) - // TODO (v1beta2) Use new replica counters if newStatus.Deprecated == nil { newStatus.Deprecated = &clusterv1.MachineSetDeprecatedStatus{} } @@ -1290,16 +1289,15 @@ func shouldRequeueForReplicaCountersRefresh(s *scope) ctrl.Result { // exceeds MinReadySeconds could be incorrect. // To avoid an available replica stuck in the ready state, we force a reconcile after MinReadySeconds, // at which point it should confirm any available replica to be available. - // TODO (v1beta2) Use new replica counters if s.machineSet.Spec.MinReadySeconds > 0 && - s.machineSet.Status.Deprecated.V1Beta1.ReadyReplicas == replicas && - s.machineSet.Status.Deprecated.V1Beta1.AvailableReplicas != replicas { + ptr.Deref(s.machineSet.Status.ReadyReplicas, 0) == replicas && + ptr.Deref(s.machineSet.Status.AvailableReplicas, 0) != replicas { minReadyResult := ctrl.Result{RequeueAfter: time.Duration(s.machineSet.Spec.MinReadySeconds) * time.Second} return minReadyResult } // Quickly reconcile until the nodes become Ready. - if s.machineSet.Status.Deprecated.V1Beta1.ReadyReplicas != replicas { + if ptr.Deref(s.machineSet.Status.ReadyReplicas, 0) != replicas { return ctrl.Result{RequeueAfter: 15 * time.Second} } diff --git a/internal/controllers/machineset/machineset_controller_status.go b/internal/controllers/machineset/machineset_controller_status.go index f48f3b0aab8a..ae26135c2d28 100644 --- a/internal/controllers/machineset/machineset_controller_status.go +++ b/internal/controllers/machineset/machineset_controller_status.go @@ -37,12 +37,8 @@ import ( // updateStatus updates MachineSet's status. // Additionally, this func should ensure that the conditions managed by this controller are always set in order to // comply with the recommendation in the Kubernetes API guidelines. -// Note: v1beta1 conditions are not managed by this func. func (r *Reconciler) updateStatus(ctx context.Context, s *scope) { - // Update the following fields in status from the machines list. - // - v1beta2.readyReplicas - // - v1beta2.availableReplicas - // - v1beta2.upToDateReplicas + // Update replica counter fields in status from the machines list. setReplicas(ctx, s.machineSet, s.machines, s.getAndAdoptMachinesForMachineSetSucceeded) // Conditions diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 24adf69e110a..69db16dd1619 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -73,6 +73,8 @@ func TestMachineSetReconciler(t *testing.T) { // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. patch := client.MergeFrom(cluster.DeepCopy()) cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{InfrastructureProvisioned: true} + + cluster.Status.Conditions = []metav1.Condition{{Type: clusterv1.ClusterControlPlaneInitializedCondition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterControlPlaneInitializedReason, LastTransitionTime: metav1.Now()}} g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed()) return ns, cluster @@ -417,11 +419,7 @@ func TestMachineSetReconciler(t *testing.T) { if err := env.Get(ctx, key, instance); err != nil { return -1 } - availableReplicas := int32(0) - if instance.Status.Deprecated != nil && instance.Status.Deprecated.V1Beta1 != nil { - availableReplicas = instance.Status.Deprecated.V1Beta1.AvailableReplicas - } - return availableReplicas + return ptr.Deref(instance.Status.AvailableReplicas, 0) }, timeout).Should(BeEquivalentTo(replicas)) t.Log("Verifying MachineSet has MachinesCreatedCondition") @@ -1058,7 +1056,7 @@ func TestMachineSetReconciler_updateStatusResizedCondition(t *testing.T) { getAndAdoptMachinesForMachineSetSucceeded: true, } setReplicas(ctx, s.machineSet, s.machines, tc.machines != nil) - g.Expect(msr.reconcileStatus(ctx, s)).To(Succeed()) + g.Expect(msr.reconcileV1Beta1Status(ctx, s)).To(Succeed()) gotCond := v1beta1conditions.Get(tc.machineSet, clusterv1.ResizedV1Beta1Condition) g.Expect(gotCond).ToNot(BeNil()) g.Expect(gotCond.Status).To(Equal(corev1.ConditionFalse)) diff --git a/internal/controllers/machineset/suite_test.go b/internal/controllers/machineset/suite_test.go index cd09d2a1c671..ef92cd8a5707 100644 --- a/internal/controllers/machineset/suite_test.go +++ b/internal/controllers/machineset/suite_test.go @@ -208,7 +208,12 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { // Patch the node and make it look like ready. patchNode := client.MergeFrom(node.DeepCopy()) - node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue, Reason: "SomeReason"}) + node.Status.Conditions = append(node.Status.Conditions, + corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + corev1.NodeCondition{Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse}, + corev1.NodeCondition{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse}, + corev1.NodeCondition{Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse}, + ) g.Expect(env.Status().Patch(ctx, node, patchNode)).To(Succeed()) // Patch the Machine. diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index dc9220cb1f9c..6efd7c63e83e 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -995,8 +995,8 @@ func assertControlPlaneReconcile(cluster *clusterv1.Cluster) error { } // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology - if int32(*replicas) != *cluster.Spec.Topology.ControlPlane.Replicas { - return fmt.Errorf("replicas %v do not match expected %v", int32(*replicas), *cluster.Spec.Topology.ControlPlane.Replicas) + if *replicas != *cluster.Spec.Topology.ControlPlane.Replicas { + return fmt.Errorf("replicas %v do not match expected %v", *replicas, *cluster.Spec.Topology.ControlPlane.Replicas) } } clusterClass := &clusterv1.ClusterClass{} diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index 9be71c05092e..7d3172a1969f 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -276,15 +277,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { Object: builder.MachineDeployment("ns1", "md0-abc123"). WithReplicas(2). WithStatus(clusterv1.MachineDeploymentStatus{ - Replicas: int32(1), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(1), - ReadyReplicas: int32(1), - AvailableReplicas: int32(1), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(1), + ReadyReplicas: ptr.To[int32](1), + UpToDateReplicas: ptr.To[int32](1), + AvailableReplicas: ptr.To[int32](1), }). Build(), }, @@ -379,15 +375,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { Object: builder.MachineDeployment("ns1", "md0-abc123"). WithReplicas(2). WithStatus(clusterv1.MachineDeploymentStatus{ - Replicas: int32(2), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(2), - ReadyReplicas: int32(2), - AvailableReplicas: int32(2), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(2), + ReadyReplicas: ptr.To[int32](2), + UpToDateReplicas: ptr.To[int32](2), + AvailableReplicas: ptr.To[int32](2), }). Build(), }, @@ -554,15 +545,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { }). WithStatus(clusterv1.MachineDeploymentStatus{ // MD is not ready because we don't have 2 updated, ready and available replicas. - Replicas: int32(2), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(1), - ReadyReplicas: int32(1), - AvailableReplicas: int32(1), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(2), + ReadyReplicas: ptr.To[int32](1), + UpToDateReplicas: ptr.To[int32](1), + AvailableReplicas: ptr.To[int32](1), }). Build(), }, @@ -576,15 +562,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { }, }). WithStatus(clusterv1.MachineDeploymentStatus{ - Replicas: int32(2), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(2), - ReadyReplicas: int32(2), - AvailableReplicas: int32(2), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(2), + ReadyReplicas: ptr.To[int32](2), + UpToDateReplicas: ptr.To[int32](2), + AvailableReplicas: ptr.To[int32](2), }). Build(), }, @@ -720,15 +701,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { WithReplicas(2). WithVersion("v1.22.0"). WithStatus(clusterv1.MachineDeploymentStatus{ - Replicas: int32(2), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(2), - ReadyReplicas: int32(2), - AvailableReplicas: int32(2), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(2), + ReadyReplicas: ptr.To[int32](2), + UpToDateReplicas: ptr.To[int32](2), + AvailableReplicas: ptr.To[int32](2), }). Build(), }, @@ -737,15 +713,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { WithReplicas(2). WithVersion("v1.21.2"). WithStatus(clusterv1.MachineDeploymentStatus{ - Replicas: int32(2), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(2), - ReadyReplicas: int32(2), - AvailableReplicas: int32(2), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(2), + ReadyReplicas: ptr.To[int32](2), + UpToDateReplicas: ptr.To[int32](2), + AvailableReplicas: ptr.To[int32](2), }). Build(), }, @@ -858,15 +829,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { WithReplicas(2). WithVersion("v1.22.0"). WithStatus(clusterv1.MachineDeploymentStatus{ - Replicas: int32(1), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(1), - ReadyReplicas: int32(1), - AvailableReplicas: int32(1), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(1), + ReadyReplicas: ptr.To[int32](2), + UpToDateReplicas: ptr.To[int32](2), + AvailableReplicas: ptr.To[int32](2), }). Build(), }, @@ -875,15 +841,10 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { WithReplicas(2). WithVersion("v1.22.0"). WithStatus(clusterv1.MachineDeploymentStatus{ - Replicas: int32(2), - Deprecated: &clusterv1.MachineDeploymentDeprecatedStatus{ - V1Beta1: &clusterv1.MachineDeploymentV1Beta1DeprecatedStatus{ - UpdatedReplicas: int32(2), - ReadyReplicas: int32(2), - AvailableReplicas: int32(2), - UnavailableReplicas: int32(0), - }, - }, + Replicas: int32(2), + ReadyReplicas: ptr.To[int32](2), + UpToDateReplicas: ptr.To[int32](2), + AvailableReplicas: ptr.To[int32](2), }). Build(), }, diff --git a/internal/util/tree/tree.go b/internal/util/tree/tree.go index b9bd6e29e911..c89a5b6cfb0a 100644 --- a/internal/util/tree/tree.go +++ b/internal/util/tree/tree.go @@ -285,7 +285,7 @@ func addOtherConditions(prefix string, tbl *tablewriter.Table, objectTree *tree. childrenPipe = pipe } - negativePolarityConditions := sets.New( + negativePolarityConditions := sets.New[string]( clusterv1.PausedCondition, clusterv1.DeletingCondition, clusterv1.RollingOutCondition, @@ -649,13 +649,13 @@ func newRowDescriptor(obj ctrlclient.Object) rowDescriptor { } } - if c, err := contract.ControlPlane().V1Beta2AvailableReplicas(contractVersion).Get(obj); err == nil && c != nil { + if c, err := contract.ControlPlane().AvailableReplicas().Get(obj); err == nil && c != nil { v.availableCounters = fmt.Sprintf("%d", *c) } - if c, err := contract.ControlPlane().V1Beta2ReadyReplicas(contractVersion).Get(obj); err == nil && c != nil { + if c, err := contract.ControlPlane().ReadyReplicas().Get(obj); err == nil && c != nil { v.readyCounters = fmt.Sprintf("%d", *c) } - if c, err := contract.ControlPlane().V1Beta2UpToDateReplicas(contractVersion).Get(obj); err == nil && c != nil { + if c, err := contract.ControlPlane().UpToDateReplicas(contractVersion).Get(obj); err == nil && c != nil { v.upToDateCounters = fmt.Sprintf("%d", *c) } } diff --git a/test/framework/control_plane.go b/test/framework/control_plane.go index c3635bfab60f..4edcae3dd2af 100644 --- a/test/framework/control_plane.go +++ b/test/framework/control_plane.go @@ -21,6 +21,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta2" @@ -44,11 +45,6 @@ func WaitForControlPlaneToBeUpToDate(ctx context.Context, input WaitForControlPl if err := input.Getter.Get(ctx, key, controlplane); err != nil { return 0, err } - // TODO (v1beta2) Use new replica counters - updatedReplicas := int32(0) - if controlplane.Status.Deprecated != nil && controlplane.Status.Deprecated.V1Beta1 != nil { - updatedReplicas = controlplane.Status.Deprecated.V1Beta1.UpdatedReplicas - } - return updatedReplicas, nil + return ptr.Deref(controlplane.Status.UpToDateReplicas, 0), nil }, intervals...).Should(Equal(*input.ControlPlane.Spec.Replicas), "Timed waiting for all control plane replicas to be updated") } diff --git a/test/framework/controlplane_helpers.go b/test/framework/controlplane_helpers.go index 42fa046c7460..d318121a547f 100644 --- a/test/framework/controlplane_helpers.go +++ b/test/framework/controlplane_helpers.go @@ -174,24 +174,17 @@ func WaitForControlPlaneToBeReady(ctx context.Context, input WaitForControlPlane desiredReplicas := controlplane.Spec.Replicas statusReplicas := controlplane.Status.Replicas - // TODO (v1beta2) Use new replica counters - updatedReplicas := int32(0) - readyReplicas := int32(0) - unavailableReplicas := int32(0) - if controlplane.Status.Deprecated != nil && controlplane.Status.Deprecated.V1Beta1 != nil { - updatedReplicas = controlplane.Status.Deprecated.V1Beta1.UpdatedReplicas - readyReplicas = controlplane.Status.Deprecated.V1Beta1.ReadyReplicas - unavailableReplicas = controlplane.Status.Deprecated.V1Beta1.UnavailableReplicas - } + upToDatedReplicas := ptr.Deref(controlplane.Status.UpToDateReplicas, 0) + readyReplicas := ptr.Deref(controlplane.Status.ReadyReplicas, 0) + availableReplicas := ptr.Deref(controlplane.Status.AvailableReplicas, 0) // Control plane is still rolling out (and thus not ready) if: - // * .spec.replicas, .status.replicas, .status.updatedReplicas, - // .status.readyReplicas are not equal and - // * unavailableReplicas > 0 + // * .spec.replicas, .status.replicas, .status.upToDateReplicas, + // .status.readyReplicas, .status.availableReplicas are not equal. if statusReplicas != *desiredReplicas || - updatedReplicas != *desiredReplicas || + upToDatedReplicas != *desiredReplicas || readyReplicas != *desiredReplicas || - unavailableReplicas > 0 { + availableReplicas != *desiredReplicas { return false, nil } diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index 786cc9d74170..324fb2713d46 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -341,10 +341,8 @@ func UpgradeMachineDeploymentInfrastructureRefAndWait(ctx context.Context, input // MachineSet should be rolled out. g.Expect(newMachineSet.Spec.Replicas).To(Equal(deployment.Spec.Replicas)) g.Expect(*newMachineSet.Spec.Replicas).To(Equal(newMachineSet.Status.Replicas)) - g.Expect(*newMachineSet.Status.Deprecated).ToNot(BeNil()) - g.Expect(*newMachineSet.Status.Deprecated.V1Beta1).ToNot(BeNil()) - g.Expect(*newMachineSet.Spec.Replicas).To(Equal(newMachineSet.Status.Deprecated.V1Beta1.ReadyReplicas)) - g.Expect(*newMachineSet.Spec.Replicas).To(Equal(newMachineSet.Status.Deprecated.V1Beta1.AvailableReplicas)) + g.Expect(newMachineSet.Spec.Replicas).To(Equal(newMachineSet.Status.ReadyReplicas)) + g.Expect(newMachineSet.Spec.Replicas).To(Equal(newMachineSet.Status.AvailableReplicas)) // MachineSet should have the same infrastructureRef as the MachineDeployment. g.Expect(newMachineSet.Spec.Template.Spec.InfrastructureRef).To(BeComparableTo(deployment.Spec.Template.Spec.InfrastructureRef))