Skip to content

✨ Updates AWSManagedCluster, ROSACluster with Paused Condition #5394

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions api/v1beta2/awsmanagedcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ type AWSManagedClusterStatus struct {
// FailureDomains specifies a list fo available availability zones that can be used
// +optional
FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`

// Conditions defines current service state of the AWSManagedCluster.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down Expand Up @@ -69,3 +73,15 @@ type AWSManagedClusterList struct {
func init() {
SchemeBuilder.Register(&AWSManagedCluster{}, &AWSManagedClusterList{})
}

// GetConditions returns the observations of the operational state of the
// AWSManagedCluster resource.
func (r *AWSManagedCluster) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}

// SetConditions sets the underlying service state of the AWSManagedCluster to
// the predescribed clusterv1.Conditions.
func (r *AWSManagedCluster) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}
7 changes: 7 additions & 0 deletions api/v1beta2/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,51 @@ spec:
status:
description: AWSManagedClusterStatus defines the observed state of AWSManagedCluster
properties:
conditions:
description: Conditions defines current service state of the AWSManagedCluster.
items:
description: Condition defines an observation of a Cluster API resource
operational state.
properties:
lastTransitionTime:
description: |-
Last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
A human readable message indicating details about the transition.
This field may be empty.
type: string
reason:
description: |-
The reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
This field may be empty.
type: string
severity:
description: |-
severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
type: string
status:
description: status of the condition, one of True, False, Unknown.
type: string
type:
description: |-
type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
failureDomains:
additionalProperties:
description: |-
Expand Down
45 changes: 45 additions & 0 deletions config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,51 @@ spec:
status:
description: ROSAClusterStatus defines the observed state of ROSACluster.
properties:
conditions:
description: Conditions defines current service state of the ROSACluster.
items:
description: Condition defines an observation of a Cluster API resource
operational state.
properties:
lastTransitionTime:
description: |-
Last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when
the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
A human readable message indicating details about the transition.
This field may be empty.
type: string
reason:
description: |-
The reason for the condition's last transition in CamelCase.
The specific API may choose whether or not this field is considered a guaranteed API.
This field may be empty.
type: string
severity:
description: |-
severity provides an explicit classification of Reason code, so the users or machines can immediately
understand the current situation and act accordingly.
The Severity field MUST be set only when Status=False.
type: string
status:
description: status of the condition, one of True, False, Unknown.
type: string
type:
description: |-
type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability to deconflict is important.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
failureDomains:
additionalProperties:
description: |-
Expand Down
17 changes: 8 additions & 9 deletions controllers/awsmanagedcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ import (
infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2"
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
"sigs.k8s.io/cluster-api-provider-aws/v2/util/paused"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
)
Expand Down Expand Up @@ -78,11 +78,6 @@ func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re
return reconcile.Result{}, nil
}

if annotations.IsPaused(cluster, awsManagedCluster) {
log.Info("AWSManagedCluster or linked Cluster is marked as paused. Won't reconcile")
return reconcile.Result{}, nil
}

log = log.WithValues("cluster", cluster.Name)

controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{}
Expand All @@ -95,6 +90,10 @@ func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re
return reconcile.Result{}, fmt.Errorf("failed to get control plane ref: %w", err)
}

if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, awsManagedCluster); err != nil || isPaused || conditionChanged {
return ctrl.Result{}, err
}

log = log.WithValues("controlPlane", controlPlaneRef.Name)

patchHelper, err := patch.NewHelper(awsManagedCluster, r.Client)
Expand Down Expand Up @@ -124,19 +123,19 @@ func (r *AWSManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr
controller, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(awsManagedCluster).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
WithEventFilter(predicates.ResourceIsNotExternallyManaged(mgr.GetScheme(), log.GetLogger())).
Build(r)

if err != nil {
return fmt.Errorf("error creating controller: %w", err)
}

// Add a watch for clusterv1.Cluster unpaise
// Add a watch for clusterv1.Cluster unpause
if err = controller.Watch(
source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("AWSManagedCluster"), mgr.GetClient(), &infrav1.AWSManagedCluster{})),
predicates.ClusterUnpaused(mgr.GetScheme(), log.GetLogger())),
predicates.ClusterPausedTransitions(mgr.GetScheme(), log.GetLogger())),
); err != nil {
return fmt.Errorf("failed adding a watch for ready clusters: %w", err)
}
Expand Down
11 changes: 5 additions & 6 deletions controllers/rosacluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ import (
expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2"
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger"
"sigs.k8s.io/cluster-api-provider-aws/v2/util/paused"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
)
Expand Down Expand Up @@ -82,9 +82,8 @@ func (r *ROSAClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return reconcile.Result{}, nil
}

if annotations.IsPaused(cluster, rosaCluster) {
log.Info("ROSACluster or linked Cluster is marked as paused. Won't reconcile")
return reconcile.Result{}, nil
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, rosaCluster); err != nil || isPaused || conditionChanged {
return ctrl.Result{}, err
}

log = log.WithValues("cluster", cluster.Name)
Expand Down Expand Up @@ -127,7 +126,7 @@ func (r *ROSAClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M
controller, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(rosaCluster).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
Build(r)

if err != nil {
Expand All @@ -138,7 +137,7 @@ func (r *ROSAClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M
if err = controller.Watch(
source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("ROSACluster"), mgr.GetClient(), &expinfrav1.ROSACluster{})),
predicates.ClusterUnpaused(mgr.GetScheme(), log.GetLogger())),
predicates.ClusterPausedTransitions(mgr.GetScheme(), log.GetLogger())),
); err != nil {
return fmt.Errorf("failed adding a watch for ready clusters: %w", err)
}
Expand Down
16 changes: 16 additions & 0 deletions exp/api/v1beta2/rosacluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ type ROSAClusterStatus struct {
// FailureDomains specifies a list fo available availability zones that can be used
// +optional
FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"`

// Conditions defines current service state of the ROSACluster.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down Expand Up @@ -66,6 +70,18 @@ type ROSAClusterList struct {
Items []ROSACluster `json:"items"`
}

// GetConditions returns the observations of the operational state of the
// ROSACluster resource.
func (r *ROSACluster) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
}

// SetConditions sets the underlying service state of the ROSACluster to the
// predescribed clusterv1.Conditions.
func (r *ROSACluster) SetConditions(conditions clusterv1.Conditions) {
r.Status.Conditions = conditions
}

func init() {
SchemeBuilder.Register(&ROSACluster{}, &ROSAClusterList{})
}
7 changes: 7 additions & 0 deletions exp/api/v1beta2/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading