diff --git a/api/v1beta2/awsmanagedcluster_types.go b/api/v1beta2/awsmanagedcluster_types.go index 587ace7654..67d9b2fc92 100644 --- a/api/v1beta2/awsmanagedcluster_types.go +++ b/api/v1beta2/awsmanagedcluster_types.go @@ -38,6 +38,10 @@ type AWSManagedClusterStatus struct { // FailureDomains specifies a list fo available availability zones that can be used // +optional FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + + // Conditions defines current service state of the AWSManagedCluster. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -69,3 +73,15 @@ type AWSManagedClusterList struct { func init() { SchemeBuilder.Register(&AWSManagedCluster{}, &AWSManagedClusterList{}) } + +// GetConditions returns the observations of the operational state of the +// AWSManagedCluster resource. +func (r *AWSManagedCluster) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the AWSManagedCluster to +// the predescribed clusterv1.Conditions. +func (r *AWSManagedCluster) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index b66c06633a..a3ef61f24e 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -1024,6 +1024,13 @@ func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) { (*out)[key] = *val.DeepCopy() } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterStatus. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml index d9beb6483e..ae196478db 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml @@ -76,6 +76,51 @@ spec: status: description: AWSManagedClusterStatus defines the observed state of AWSManagedCluster properties: + conditions: + description: Conditions defines current service state of the AWSManagedCluster. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array failureDomains: additionalProperties: description: |- diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml index 489cd07dbd..c31ff0260b 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml @@ -76,6 +76,51 @@ spec: status: description: ROSAClusterStatus defines the observed state of ROSACluster. properties: + conditions: + description: Conditions defines current service state of the ROSACluster. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + A human readable message indicating details about the transition. + This field may be empty. + type: string + reason: + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array failureDomains: additionalProperties: description: |- diff --git a/controllers/awsmanagedcluster_controller.go b/controllers/awsmanagedcluster_controller.go index 3f5c629306..17da9578be 100644 --- a/controllers/awsmanagedcluster_controller.go +++ b/controllers/awsmanagedcluster_controller.go @@ -35,9 +35,9 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -78,11 +78,6 @@ func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re return reconcile.Result{}, nil } - if annotations.IsPaused(cluster, awsManagedCluster) { - log.Info("AWSManagedCluster or linked Cluster is marked as paused. Won't reconcile") - return reconcile.Result{}, nil - } - log = log.WithValues("cluster", cluster.Name) controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} @@ -95,6 +90,10 @@ func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re return reconcile.Result{}, fmt.Errorf("failed to get control plane ref: %w", err) } + if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, awsManagedCluster); err != nil || isPaused || conditionChanged { + return ctrl.Result{}, err + } + log = log.WithValues("controlPlane", controlPlaneRef.Name) patchHelper, err := patch.NewHelper(awsManagedCluster, r.Client) @@ -124,7 +123,7 @@ func (r *AWSManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr controller, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(awsManagedCluster). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)). WithEventFilter(predicates.ResourceIsNotExternallyManaged(mgr.GetScheme(), log.GetLogger())). Build(r) @@ -132,11 +131,11 @@ func (r *AWSManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr return fmt.Errorf("error creating controller: %w", err) } - // Add a watch for clusterv1.Cluster unpaise + // Add a watch for clusterv1.Cluster unpause if err = controller.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AWSManagedCluster"), mgr.GetClient(), &infrav1.AWSManagedCluster{})), - predicates.ClusterUnpaused(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitions(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } diff --git a/controllers/rosacluster_controller.go b/controllers/rosacluster_controller.go index 0b061f1853..bd880fa427 100644 --- a/controllers/rosacluster_controller.go +++ b/controllers/rosacluster_controller.go @@ -37,9 +37,9 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -82,9 +82,8 @@ func (r *ROSAClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) return reconcile.Result{}, nil } - if annotations.IsPaused(cluster, rosaCluster) { - log.Info("ROSACluster or linked Cluster is marked as paused. Won't reconcile") - return reconcile.Result{}, nil + if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, rosaCluster); err != nil || isPaused || conditionChanged { + return ctrl.Result{}, err } log = log.WithValues("cluster", cluster.Name) @@ -127,7 +126,7 @@ func (r *ROSAClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M controller, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(rosaCluster). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Build(r) if err != nil { @@ -138,7 +137,7 @@ func (r *ROSAClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M if err = controller.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("ROSACluster"), mgr.GetClient(), &expinfrav1.ROSACluster{})), - predicates.ClusterUnpaused(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitions(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } diff --git a/exp/api/v1beta2/rosacluster_types.go b/exp/api/v1beta2/rosacluster_types.go index 1b3ffa5d77..3303125d1c 100644 --- a/exp/api/v1beta2/rosacluster_types.go +++ b/exp/api/v1beta2/rosacluster_types.go @@ -38,6 +38,10 @@ type ROSAClusterStatus struct { // FailureDomains specifies a list fo available availability zones that can be used // +optional FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + + // Conditions defines current service state of the ROSACluster. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -66,6 +70,18 @@ type ROSAClusterList struct { Items []ROSACluster `json:"items"` } +// GetConditions returns the observations of the operational state of the +// ROSACluster resource. +func (r *ROSACluster) GetConditions() clusterv1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the ROSACluster to the +// predescribed clusterv1.Conditions. +func (r *ROSACluster) SetConditions(conditions clusterv1.Conditions) { + r.Status.Conditions = conditions +} + func init() { SchemeBuilder.Register(&ROSACluster{}, &ROSAClusterList{}) } diff --git a/exp/api/v1beta2/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go index 3bea8430ca..3332aaeedc 100644 --- a/exp/api/v1beta2/zz_generated.deepcopy.go +++ b/exp/api/v1beta2/zz_generated.deepcopy.go @@ -977,6 +977,13 @@ func (in *ROSAClusterStatus) DeepCopyInto(out *ROSAClusterStatus) { (*out)[key] = *val.DeepCopy() } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSAClusterStatus.