Skip to content

Commit 78b6874

Browse files
committed
Improved management of status for IndexerCluster and SearchHeadCluster
Added doc example on using a Horizonal Pod Autoscaler
1 parent 9a760bb commit 78b6874

File tree

7 files changed

+187
-48
lines changed

7 files changed

+187
-48
lines changed

docs/Examples.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,32 @@ $ kubectl scale idc example --replicas=5
107107
indexercluster.enterprise.splunk.com/example scaled
108108
```
109109

110+
You can also create [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/)
111+
to manage scaling for you. For example:
112+
113+
```yaml
114+
cat <<EOF | kubectl apply -f -
115+
apiVersion: autoscaling/v1
116+
kind: HorizontalPodAutoscaler
117+
metadata:
118+
name: idc-example
119+
spec:
120+
scaleTargetRef:
121+
apiVersion: enterprise.splunk.com/v1alpha2
122+
kind: IndexerCluster
123+
name: example
124+
minReplicas: 5
125+
maxReplicas: 10
126+
targetCPUUtilizationPercentage: 50
127+
EOF
128+
```
129+
130+
```
131+
$ kubectl get hpa
132+
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
133+
idc-example IndexerCluster/example 16%/50% 5 10 5 15m
134+
```
135+
110136
To create a standalone search head that uses your indexer cluster, all you
111137
have to do is add an `indexerClusterRef` parameter:
112138

pkg/splunk/reconcile/indexercluster.go

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,14 @@ func ApplyIndexerCluster(client ControllerClient, cr *enterprisev1.IndexerCluste
5151
cr.Status.ClusterMasterPhase = enterprisev1.PhaseError
5252
cr.Status.Replicas = cr.Spec.Replicas
5353
cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetIdentifier())
54+
if cr.Status.Peers == nil {
55+
cr.Status.Peers = []enterprisev1.IndexerClusterMemberStatus{}
56+
}
5457
defer func() {
55-
client.Status().Update(context.TODO(), cr)
58+
err = client.Status().Update(context.TODO(), cr)
59+
if err != nil {
60+
scopedLog.Error(err, "Status update failed")
61+
}
5662
}()
5763

5864
// check if deletion has been requested
@@ -227,7 +233,6 @@ func (mgr *IndexerClusterPodManager) getClusterMasterClient() *splclient.SplunkC
227233
// updateStatus for IndexerClusterPodManager uses the REST API to update the status for a SearcHead custom resource
228234
func (mgr *IndexerClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSet) error {
229235
mgr.cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas
230-
mgr.cr.Status.Peers = []enterprisev1.IndexerClusterMemberStatus{}
231236

232237
if mgr.cr.Status.ClusterMasterPhase != enterprisev1.PhaseReady {
233238
mgr.cr.Status.Initialized = false
@@ -266,7 +271,16 @@ func (mgr *IndexerClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSe
266271
} else {
267272
mgr.log.Info("Peer is not known by cluster master", "peerName", peerName)
268273
}
269-
mgr.cr.Status.Peers = append(mgr.cr.Status.Peers, peerStatus)
274+
if n < int32(len(mgr.cr.Status.Peers)) {
275+
mgr.cr.Status.Peers[n] = peerStatus
276+
} else {
277+
mgr.cr.Status.Peers = append(mgr.cr.Status.Peers, peerStatus)
278+
}
279+
}
280+
281+
// truncate any extra peers that we didn't check (leftover from scale down)
282+
if statefulSet.Status.Replicas < int32(len(mgr.cr.Status.Peers)) {
283+
mgr.cr.Status.Peers = mgr.cr.Status.Peers[:statefulSet.Status.Replicas]
270284
}
271285

272286
return nil

pkg/splunk/reconcile/indexercluster_test.go

Lines changed: 34 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,20 @@ func TestIndexerClusterPodManager(t *testing.T) {
137137

138138
// test 1 ready pod
139139
mockHandlers := []spltest.MockHTTPHandler{
140-
{"GET", "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/info?count=0&output_mode=json", 200, nil,
141-
`{"links":{},"origin":"https://localhost:8089/services/cluster/master/info","updated":"2020-03-18T01:04:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"master","id":"https://localhost:8089/services/cluster/master/info/master","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/info/master","list":"/services/cluster/master/info/master"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"apply_bundle_status":{"invalid_bundle":{"bundle_path":"","bundle_validation_errors_on_master":[],"checksum":"","timestamp":0},"reload_bundle_issued":false,"status":"None"},"backup_and_restore_primaries":false,"controlled_rolling_restart_flag":false,"eai:acl":null,"indexing_ready_flag":true,"initialized_flag":true,"label":"splunk-stack1-cluster-master-0","last_check_restart_bundle_result":false,"last_dry_run_bundle":{"bundle_path":"","checksum":"","timestamp":0},"last_validated_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/0af7c0e95f313f7be3b0cb1d878df9a1-1583948640.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","is_valid_bundle":true,"timestamp":1583948640},"latest_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"maintenance_mode":false,"multisite":false,"previous_active_bundle":{"bundle_path":"","checksum":"","timestamp":0},"primaries_backup_status":"No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.","quiet_period_flag":false,"rolling_restart_flag":false,"rolling_restart_or_upgrade":false,"service_ready_flag":true,"start_time":1583948636,"summary_replication":"false"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`},
142-
{"GET", "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/peers?count=0&output_mode=json", 200, nil,
143-
`{"links":{"create":"/services/cluster/master/peers/_new"},"origin":"https://localhost:8089/services/cluster/master/peers","updated":"2020-03-18T01:08:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"D39B1729-E2C5-4273-B9B2-534DA7C2F866","id":"https://localhost:8089/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","list":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","edit":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","apply_bundle_status":{"invalid_bundle":{"bundle_validation_errors":[],"invalid_bundle_id":""},"reasons_for_restart":[],"restart_required_for_apply_bundle":false,"status":"None"},"base_generation_id":26,"bucket_count":73,"bucket_count_by_index":{"_audit":24,"_internal":45,"_telemetry":4},"buckets_rf_by_origin_site":{"default":73},"buckets_sf_by_origin_site":{"default":73},"delayed_buckets_to_discard":[],"eai:acl":null,"fixup_set":[],"heartbeat_started":true,"host_port_pair":"10.36.0.6:8089","indexing_disk_space":210707374080,"is_searchable":true,"is_valid_bundle":true,"label":"splunk-stack1-indexer-0","last_dry_run_bundle":"","last_heartbeat":1584493732,"last_validated_bundle":"14310A4AABD23E85BBD4559C4A3B59F8","latest_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","peer_registered_summaries":true,"pending_builds":[],"pending_job_count":0,"primary_count":73,"primary_count_remote":0,"register_search_address":"10.36.0.6:8089","replication_count":0,"replication_port":9887,"replication_use_ssl":false,"restart_required_for_applying_dry_run_bundle":false,"search_state_counter":{"PendingSearchable":0,"Searchable":73,"SearchablePendingMask":0,"Unsearchable":0},"site":"default","splunk_version":"8.0.2","status":"Up","status_counter":{"Complete":69,"NonStreamingTarget":0,"StreamingSource":4,"StreamingTarget":0},"summary_replication_count":0}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`},
140+
{
141+
Method: "GET",
142+
URL: "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/info?count=0&output_mode=json",
143+
Status: 200,
144+
Err: nil,
145+
Body: `{"links":{},"origin":"https://localhost:8089/services/cluster/master/info","updated":"2020-03-18T01:04:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"master","id":"https://localhost:8089/services/cluster/master/info/master","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/info/master","list":"/services/cluster/master/info/master"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"apply_bundle_status":{"invalid_bundle":{"bundle_path":"","bundle_validation_errors_on_master":[],"checksum":"","timestamp":0},"reload_bundle_issued":false,"status":"None"},"backup_and_restore_primaries":false,"controlled_rolling_restart_flag":false,"eai:acl":null,"indexing_ready_flag":true,"initialized_flag":true,"label":"splunk-stack1-cluster-master-0","last_check_restart_bundle_result":false,"last_dry_run_bundle":{"bundle_path":"","checksum":"","timestamp":0},"last_validated_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/0af7c0e95f313f7be3b0cb1d878df9a1-1583948640.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","is_valid_bundle":true,"timestamp":1583948640},"latest_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"maintenance_mode":false,"multisite":false,"previous_active_bundle":{"bundle_path":"","checksum":"","timestamp":0},"primaries_backup_status":"No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.","quiet_period_flag":false,"rolling_restart_flag":false,"rolling_restart_or_upgrade":false,"service_ready_flag":true,"start_time":1583948636,"summary_replication":"false"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`,
146+
},
147+
{
148+
Method: "GET",
149+
URL: "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/peers?count=0&output_mode=json",
150+
Status: 200,
151+
Err: nil,
152+
Body: `{"links":{"create":"/services/cluster/master/peers/_new"},"origin":"https://localhost:8089/services/cluster/master/peers","updated":"2020-03-18T01:08:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"D39B1729-E2C5-4273-B9B2-534DA7C2F866","id":"https://localhost:8089/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","list":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","edit":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","apply_bundle_status":{"invalid_bundle":{"bundle_validation_errors":[],"invalid_bundle_id":""},"reasons_for_restart":[],"restart_required_for_apply_bundle":false,"status":"None"},"base_generation_id":26,"bucket_count":73,"bucket_count_by_index":{"_audit":24,"_internal":45,"_telemetry":4},"buckets_rf_by_origin_site":{"default":73},"buckets_sf_by_origin_site":{"default":73},"delayed_buckets_to_discard":[],"eai:acl":null,"fixup_set":[],"heartbeat_started":true,"host_port_pair":"10.36.0.6:8089","indexing_disk_space":210707374080,"is_searchable":true,"is_valid_bundle":true,"label":"splunk-stack1-indexer-0","last_dry_run_bundle":"","last_heartbeat":1584493732,"last_validated_bundle":"14310A4AABD23E85BBD4559C4A3B59F8","latest_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","peer_registered_summaries":true,"pending_builds":[],"pending_job_count":0,"primary_count":73,"primary_count_remote":0,"register_search_address":"10.36.0.6:8089","replication_count":0,"replication_port":9887,"replication_use_ssl":false,"restart_required_for_applying_dry_run_bundle":false,"search_state_counter":{"PendingSearchable":0,"Searchable":73,"SearchablePendingMask":0,"Unsearchable":0},"site":"default","splunk_version":"8.0.2","status":"Up","status_counter":{"Complete":69,"NonStreamingTarget":0,"StreamingSource":4,"StreamingTarget":0},"summary_replication_count":0}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`,
153+
},
144154
}
145155
wantCalls = map[string][]mockFuncCall{"Get": funcCalls}
146156
pod := &corev1.Pod{
@@ -151,14 +161,24 @@ func TestIndexerClusterPodManager(t *testing.T) {
151161
"controller-revision-hash": "v1",
152162
},
153163
},
164+
Status: corev1.PodStatus{
165+
Phase: corev1.PodRunning,
166+
ContainerStatuses: []corev1.ContainerStatus{
167+
{Ready: true},
168+
},
169+
},
154170
}
155171
method := "IndexerClusterPodManager.Update(All pods ready)"
156172
indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseReady, statefulSet, wantCalls, nil, statefulSet, pod)
157173

158174
// test pod needs update => decommission
159-
mockHandlers = append(mockHandlers,
160-
spltest.MockHTTPHandler{"POST", "https://splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local:8089/services/cluster/slave/control/control/decommission?enforce_counts=0", 200, nil, ``},
161-
)
175+
mockHandlers = append(mockHandlers, spltest.MockHTTPHandler{
176+
Method: "POST",
177+
URL: "https://splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local:8089/services/cluster/slave/control/control/decommission?enforce_counts=0",
178+
Status: 200,
179+
Err: nil,
180+
Body: ``,
181+
})
162182
pod.ObjectMeta.Labels["controller-revision-hash"] = "v0"
163183
method = "IndexerClusterPodManager.Update(Decommission Pod)"
164184
indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod)
@@ -192,9 +212,13 @@ func TestIndexerClusterPodManager(t *testing.T) {
192212

193213
// test scale down => decommission pod
194214
mockHandlers[1].Body = `{"entry":[{"name":"aa45bf46-7f46-47af-a760-590d5c606d10","content":{"status":"Up","label":"splunk-stack1-indexer-0"}},{"name":"D39B1729-E2C5-4273-B9B2-534DA7C2F866","content":{"status":"GracefulShutdown","label":"splunk-stack1-indexer-1"}}]}`
195-
mockHandlers = append(mockHandlers,
196-
spltest.MockHTTPHandler{"POST", "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/control/control/remove_peers?peers=D39B1729-E2C5-4273-B9B2-534DA7C2F866", 200, nil, ``},
197-
)
215+
mockHandlers = append(mockHandlers, spltest.MockHTTPHandler{
216+
Method: "POST",
217+
URL: "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/control/control/remove_peers?peers=D39B1729-E2C5-4273-B9B2-534DA7C2F866",
218+
Status: 200,
219+
Err: nil,
220+
Body: ``,
221+
})
198222
pvcCalls := []mockFuncCall{
199223
{metaName: "*v1.PersistentVolumeClaim-test-pvc-etc-splunk-stack1-1"},
200224
{metaName: "*v1.PersistentVolumeClaim-test-pvc-var-splunk-stack1-1"},

pkg/splunk/reconcile/searchheadcluster.go

Lines changed: 41 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,14 @@ func ApplySearchHeadCluster(client ControllerClient, cr *enterprisev1.SearchHead
5050
cr.Status.DeployerPhase = enterprisev1.PhaseError
5151
cr.Status.Replicas = cr.Spec.Replicas
5252
cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-search-head", cr.GetIdentifier())
53+
if cr.Status.Members == nil {
54+
cr.Status.Members = []enterprisev1.SearchHeadClusterMemberStatus{}
55+
}
5356
defer func() {
54-
client.Status().Update(context.TODO(), cr)
57+
err = client.Status().Update(context.TODO(), cr)
58+
if err != nil {
59+
scopedLog.Error(err, "Status update failed")
60+
}
5561
}()
5662

5763
// check if deletion has been requested
@@ -189,6 +195,10 @@ func (mgr *SearchHeadClusterPodManager) PrepareRecycle(n int32) (bool, error) {
189195
mgr.log.Info("Waiting for active searches to complete", "memberName", memberName)
190196
}
191197
return searchesComplete, nil
198+
199+
case "": // this can happen after the member has already been recycled and we're just waiting for state to update
200+
mgr.log.Info("Member has empty Status", "memberName", memberName)
201+
return false, nil
192202
}
193203

194204
// unhandled status
@@ -226,13 +236,14 @@ func (mgr *SearchHeadClusterPodManager) getClient(n int32) *splclient.SplunkClie
226236
// updateStatus for SearchHeadClusterPodManager uses the REST API to update the status for a SearcHead custom resource
227237
func (mgr *SearchHeadClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSet) error {
228238
// populate members status using REST API to get search head cluster member info
239+
mgr.cr.Status.Captain = ""
240+
mgr.cr.Status.CaptainReady = false
229241
mgr.cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas
230242
if mgr.cr.Status.ReadyReplicas == 0 {
231243
return nil
232244
}
233245
gotCaptainInfo := false
234-
mgr.cr.Status.Members = []enterprisev1.SearchHeadClusterMemberStatus{}
235-
for n := int32(0); n < mgr.cr.Status.ReadyReplicas; n++ {
246+
for n := int32(0); n < statefulSet.Status.Replicas; n++ {
236247
c := mgr.getClient(n)
237248
memberName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkSearchHead, mgr.cr.GetIdentifier(), n)
238249
memberStatus := enterprisev1.SearchHeadClusterMemberStatus{Name: memberName}
@@ -243,24 +254,35 @@ func (mgr *SearchHeadClusterPodManager) updateStatus(statefulSet *appsv1.Statefu
243254
memberStatus.Registered = memberInfo.Registered
244255
memberStatus.ActiveHistoricalSearchCount = memberInfo.ActiveHistoricalSearchCount
245256
memberStatus.ActiveRealtimeSearchCount = memberInfo.ActiveRealtimeSearchCount
246-
if !gotCaptainInfo {
247-
// try querying captain api; note that this should work on any node
248-
captainInfo, err := c.GetSearchHeadCaptainInfo()
249-
if err == nil {
250-
mgr.cr.Status.Captain = captainInfo.Label
251-
mgr.cr.Status.CaptainReady = captainInfo.ServiceReady
252-
mgr.cr.Status.Initialized = captainInfo.Initialized
253-
mgr.cr.Status.MinPeersJoined = captainInfo.MinPeersJoined
254-
mgr.cr.Status.MaintenanceMode = captainInfo.MaintenanceMode
255-
gotCaptainInfo = true
256-
}
257-
}
258-
} else if n < statefulSet.Status.Replicas {
259-
// ignore error if pod was just terminated for scale down event (n >= Replicas)
257+
} else {
260258
mgr.log.Error(err, "Unable to retrieve search head cluster member info", "memberName", memberName)
261-
return err
262259
}
263-
mgr.cr.Status.Members = append(mgr.cr.Status.Members, memberStatus)
260+
261+
if err == nil && !gotCaptainInfo {
262+
// try querying captain api; note that this should work on any node
263+
captainInfo, err := c.GetSearchHeadCaptainInfo()
264+
if err == nil {
265+
mgr.cr.Status.Captain = captainInfo.Label
266+
mgr.cr.Status.CaptainReady = captainInfo.ServiceReady
267+
mgr.cr.Status.Initialized = captainInfo.Initialized
268+
mgr.cr.Status.MinPeersJoined = captainInfo.MinPeersJoined
269+
mgr.cr.Status.MaintenanceMode = captainInfo.MaintenanceMode
270+
gotCaptainInfo = true
271+
} else {
272+
mgr.log.Error(err, "Unable to retrieve captain info", "memberName", memberName)
273+
}
274+
}
275+
276+
if n < int32(len(mgr.cr.Status.Members)) {
277+
mgr.cr.Status.Members[n] = memberStatus
278+
} else {
279+
mgr.cr.Status.Members = append(mgr.cr.Status.Members, memberStatus)
280+
}
281+
}
282+
283+
// truncate any extra members that we didn't check (leftover from scale down)
284+
if statefulSet.Status.Replicas < int32(len(mgr.cr.Status.Members)) {
285+
mgr.cr.Status.Members = mgr.cr.Status.Members[:statefulSet.Status.Replicas]
264286
}
265287

266288
return nil

0 commit comments

Comments
 (0)