-
Notifications
You must be signed in to change notification settings - Fork 31
/
Copy pathocimachine_controller.go
472 lines (428 loc) · 20 KB
/
ocimachine_controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
/*
Copyright (c) 2021, 2022 Oracle and/or its affiliates.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
infrastructurev1beta2 "github.com/oracle/cluster-api-provider-oci/api/v1beta2"
"github.com/oracle/cluster-api-provider-oci/cloud/ociutil"
"github.com/oracle/cluster-api-provider-oci/cloud/scope"
cloudutil "github.com/oracle/cluster-api-provider-oci/cloud/util"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/core"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/predicates"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// OCIMachineReconciler reconciles a OciMachine object
type OCIMachineReconciler struct {
client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder
ClientProvider *scope.ClientProvider
Region string
}
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=ocimachines,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=ocimachines/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=ocimachines/finalizers,verbs=update
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the machine closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
func (r *OCIMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) {
logger := log.FromContext(ctx)
logger.Info("Got reconciliation event for machine")
ociMachine := &infrastructurev1beta2.OCIMachine{}
err := r.Get(ctx, req.NamespacedName, ociMachine)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// Fetch the Machine.
machine, err := util.GetOwnerMachine(ctx, r.Client, ociMachine.ObjectMeta)
if err != nil {
return ctrl.Result{}, err
}
if machine == nil {
r.Recorder.Eventf(ociMachine, corev1.EventTypeNormal, "OwnerRefNotSet", "Cluster Controller has not yet set OwnerRef")
logger.Info("Machine Controller has not yet set OwnerRef")
return ctrl.Result{}, nil
}
logger = logger.WithValues("machine-name", ociMachine.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, ociMachine.ObjectMeta)
if err != nil {
r.Recorder.Eventf(ociMachine, corev1.EventTypeWarning, "ClusterDoesNotExist", "Machine is missing cluster label or cluster does not exist")
logger.Info("Machine is missing cluster label or cluster does not exist")
return ctrl.Result{}, nil
}
// Return early if the object or Cluster is paused.
if annotations.IsPaused(cluster, ociMachine) {
logger.Info("OCIMachine or linked Cluster is marked as paused. Won't reconcile")
return ctrl.Result{}, nil
}
ociCluster := &infrastructurev1beta2.OCICluster{}
ociClusterName := client.ObjectKey{
Namespace: cluster.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
var clusterAccessor scope.OCIClusterAccessor
if cluster.Spec.InfrastructureRef.Kind == "OCICluster" {
if err := r.Client.Get(ctx, ociClusterName, ociCluster); err != nil {
logger.Info("Cluster is not available yet")
r.Recorder.Eventf(ociMachine, corev1.EventTypeWarning, "ClusterNotAvailable", "Cluster is not available yet")
return ctrl.Result{}, nil
}
clusterAccessor = scope.OCISelfManagedCluster{
OCICluster: ociCluster,
}
} else if cluster.Spec.InfrastructureRef.Kind == "OCIManagedCluster" {
// check for oci managed cluster
ociManagedCluster := &infrastructurev1beta2.OCIManagedCluster{}
ociManagedClusterName := client.ObjectKey{
Namespace: cluster.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, ociManagedClusterName, ociManagedCluster); err != nil {
}
clusterAccessor = scope.OCIManagedCluster{
OCIManagedCluster: ociManagedCluster,
}
} else {
r.Recorder.Eventf(ociMachine, corev1.EventTypeWarning, "InfrastructureClusterTypeNotSupported", fmt.Sprintf("Infrastructure Cluster Type %s is not supported", cluster.Spec.InfrastructureRef.Kind))
return ctrl.Result{}, errors.New(fmt.Sprintf("Infrastructure Cluster Type %s is not supported", cluster.Spec.InfrastructureRef.Kind))
}
_, _, clients, err := cloudutil.InitClientsAndRegion(ctx, r.Client, r.Region, clusterAccessor, r.ClientProvider)
if err != nil {
return ctrl.Result{}, err
}
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Client: r.Client,
ComputeClient: clients.ComputeClient,
Logger: &logger,
Cluster: cluster,
OCIClusterAccessor: clusterAccessor,
Machine: machine,
OCIMachine: ociMachine,
VCNClient: clients.VCNClient,
NetworkLoadBalancerClient: clients.NetworkLoadBalancerClient,
LoadBalancerClient: clients.LoadBalancerClient,
})
if err != nil {
return ctrl.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any GCPMachine changes.
defer func() {
if err := machineScope.Close(ctx); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted machines
if !ociMachine.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, machineScope)
}
// Handle non-deleted machines
return r.reconcileNormal(ctx, logger, machineScope)
}
// SetupWithManager sets up the controller with the Manager.
func (r *OCIMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
clusterToObjectFunc, err := util.ClusterToTypedObjectsMapper(r.Client, &infrastructurev1beta2.OCIMachineList{}, mgr.GetScheme())
if err != nil {
return errors.Wrapf(err, "failed to create mapper for Cluster to OCIMachines")
}
err = ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrastructurev1beta2.OCIMachine{}).
WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))).
Watches(
&clusterv1.Machine{},
handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrastructurev1beta2.
GroupVersion.WithKind(scope.OCIMachineKind))),
).
Watches(
&infrastructurev1beta2.OCICluster{},
handler.EnqueueRequestsFromMapFunc(r.OCIClusterToOCIMachines()),
).
Watches(
&infrastructurev1beta2.OCIManagedCluster{},
handler.EnqueueRequestsFromMapFunc(r.OCIManagedClusterToOCIMachines()),
).
Watches(
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc),
builder.WithPredicates(
predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)),
),
).
// don't queue reconcile if resource is paused
Complete(r)
if err != nil {
return errors.Wrapf(err, "error creating controller")
}
return nil
}
func (r *OCIMachineReconciler) OCIClusterToOCIMachines() handler.MapFunc {
return func(ctx context.Context, o client.Object) []ctrl.Request {
log := ctrl.LoggerFrom(ctx)
result := []ctrl.Request{}
c, ok := o.(*infrastructurev1beta2.OCICluster)
if !ok {
log.Error(errors.Errorf("expected a OCICluster but got a %T", o), "failed to get OCIMachine for OCICluster")
return nil
}
cluster, err := util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
return result
case err != nil:
log.Error(err, "failed to get owning cluster")
return result
}
labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name}
machineList := &clusterv1.MachineList{}
if err := r.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "failed to list Machines")
return nil
}
for _, m := range machineList.Items {
if m.Spec.InfrastructureRef.Name == "" {
continue
}
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
}
func (r *OCIMachineReconciler) OCIManagedClusterToOCIMachines() handler.MapFunc {
return func(ctx context.Context, o client.Object) []ctrl.Request {
log := ctrl.LoggerFrom(ctx)
result := []ctrl.Request{}
c, ok := o.(*infrastructurev1beta2.OCIManagedCluster)
if !ok {
log.Error(errors.Errorf("expected a OCICluster but got a %T", o), "failed to get OCIMachine for OCICluster")
return nil
}
cluster, err := util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
return result
case err != nil:
log.Error(err, "failed to get owning cluster")
return result
}
labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name}
machineList := &clusterv1.MachineList{}
if err := r.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "failed to list Machines")
return nil
}
for _, m := range machineList.Items {
if m.Spec.InfrastructureRef.Name == "" {
continue
}
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
}
func (r *OCIMachineReconciler) reconcileNormal(ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope) (ctrl.Result, error) {
controllerutil.AddFinalizer(machineScope.OCIMachine, infrastructurev1beta2.MachineFinalizer)
machine := machineScope.OCIMachine
infraMachine := machineScope.Machine
annotations := infraMachine.GetAnnotations()
deleteMachineOnTermination := false
if annotations != nil {
_, deleteMachineOnTermination = annotations[infrastructurev1beta2.DeleteMachineOnInstanceTermination]
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
r.Recorder.Event(machine, corev1.EventTypeNormal, infrastructurev1beta2.WaitingForBootstrapDataReason, "Bootstrap data secret reference is not yet available")
conditions.MarkFalse(machine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "")
logger.Info("Bootstrap data secret reference is not yet available")
return ctrl.Result{}, nil
}
instance, err := r.getOrCreate(ctx, machineScope)
if err != nil {
r.Recorder.Event(machine, corev1.EventTypeWarning, "ReconcileError", errors.Wrapf(err, "Failed to reconcile OCIMachine").Error())
conditions.MarkFalse(machine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "")
return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile OCI Machine %s/%s", machineScope.OCIMachine.Namespace, machineScope.OCIMachine.Name)
}
machineScope.Info("OCI Compute Instance found", "InstanceID", *instance.Id)
machine.Spec.InstanceId = instance.Id
machine.Spec.ProviderID = common.String(machineScope.OCIClusterAccessor.GetProviderID(*instance.Id))
// Proceed to reconcile the DOMachine state.
switch instance.LifecycleState {
case core.InstanceLifecycleStateProvisioning, core.InstanceLifecycleStateStarting:
machineScope.Info("Instance is pending")
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceNotReadyReason, clusterv1.ConditionSeverityInfo, "")
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
case core.InstanceLifecycleStateStopping, core.InstanceLifecycleStateStopped, core.InstanceLifecycleStateMoving:
machineScope.SetNotReady()
machineScope.Info(fmt.Sprintf("Instance is in %s state and not ready", instance.LifecycleState))
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceNotReadyReason, clusterv1.ConditionSeverityInfo, "")
return reconcile.Result{}, nil
case core.InstanceLifecycleStateRunning:
machineScope.Info("Instance is active")
if machine.Status.Addresses == nil || len(machine.Status.Addresses) == 0 {
machineScope.Info("IP address is not set on the instance, looking up the address")
ipAddresses, err := machineScope.GetInstanceIPs(ctx)
if err != nil {
r.Recorder.Event(machine, corev1.EventTypeWarning, "ReconcileError", errors.Wrapf(err, "failed to reconcile OCIMachine").Error())
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceIPAddressNotFound, clusterv1.ConditionSeverityError, "")
return ctrl.Result{}, err
}
machine.Status.Addresses = ipAddresses
}
if machineScope.IsControlPlane() {
err := machineScope.ReconcileCreateInstanceOnLB(ctx)
if err != nil {
r.Recorder.Event(machine, corev1.EventTypeWarning, "ReconcileError", errors.Wrapf(err, "failed to reconcile OCIMachine").Error())
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceLBBackendAdditionFailedReason, clusterv1.ConditionSeverityError, "")
return ctrl.Result{}, err
}
machineScope.Info("Instance is added to the control plane LB")
}
if len(machine.Spec.VnicAttachments) > 0 {
err := machineScope.ReconcileVnicAttachments(ctx)
if err != nil {
r.Recorder.Event(machine, corev1.EventTypeWarning, "ReconcileError", errors.Wrapf(err, "failed to reconcile OCIMachine").Error())
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition,
infrastructurev1beta2.InstanceVnicAttachmentFailedReason, clusterv1.ConditionSeverityError, "")
return ctrl.Result{}, err
}
machineScope.Info("Instance vnic attachment success")
r.Recorder.Eventf(machineScope.OCIMachine, corev1.EventTypeNormal, infrastructurev1beta2.InstanceVnicAttachmentReady,
"VNICs have been attached to instance.")
}
// record the event only when machine goes from not ready to ready state
r.Recorder.Eventf(machine, corev1.EventTypeNormal, "InstanceReady",
"Instance is in ready state")
conditions.MarkTrue(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition)
machineScope.SetReady()
if deleteMachineOnTermination {
// typically, if the VM is terminated, we should get machine events, so ideally, the 300 seconds
// requeue time is not required, but in case, the event is missed, adding the requeue time
return reconcile.Result{RequeueAfter: 300 * time.Second}, nil
} else {
return reconcile.Result{}, nil
}
case core.InstanceLifecycleStateTerminated:
if deleteMachineOnTermination && infraMachine.DeletionTimestamp == nil {
logger.Info("Deleting underlying machine as instance is terminated")
if err := machineScope.Client.Delete(ctx, infraMachine); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to delete machine %s/%s", infraMachine.Namespace, infraMachine.Name)
}
}
fallthrough
default:
machineScope.SetNotReady()
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "")
machineScope.SetFailureReason(capierrors.CreateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Instance status %q is unexpected", instance.LifecycleState))
r.Recorder.Eventf(machine, corev1.EventTypeWarning, "ReconcileError",
"Instance has invalid lifecycle state %s", instance.LifecycleState)
return reconcile.Result{}, errors.New(fmt.Sprintf("instance has invalid lifecycle state %s", instance.LifecycleState))
}
}
func (r *OCIMachineReconciler) getOrCreate(ctx context.Context, scope *scope.MachineScope) (*core.Instance, error) {
instance, err := scope.GetOrCreateMachine(ctx)
return instance, err
}
func (r *OCIMachineReconciler) reconcileDelete(ctx context.Context, machineScope *scope.MachineScope) (_ ctrl.Result, reterr error) {
machineScope.Info("Handling deleted OCIMachine")
instance, err := machineScope.GetMachine(ctx)
if err != nil {
if ociutil.IsNotFound(err) {
err := r.deleteInstanceFromControlPlaneLB(ctx, machineScope)
if err != nil {
return reconcile.Result{}, err
}
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceNotFoundReason, clusterv1.ConditionSeverityInfo, "")
machineScope.Info("Instance is not found, may have been deleted")
controllerutil.RemoveFinalizer(machineScope.OCIMachine, infrastructurev1beta2.MachineFinalizer)
return reconcile.Result{}, nil
} else {
return reconcile.Result{}, err
}
}
if instance == nil {
machineScope.Info("Instance is not found, may have been deleted")
controllerutil.RemoveFinalizer(machineScope.OCIMachine, infrastructurev1beta2.MachineFinalizer)
return reconcile.Result{}, nil
}
machineScope.Info("OCI Compute Instance found", "InstanceID", *instance.Id)
switch instance.LifecycleState {
case core.InstanceLifecycleStateTerminating:
machineScope.Info("Instance is terminating")
return reconcile.Result{RequeueAfter: 30 * time.Second}, nil
case core.InstanceLifecycleStateTerminated:
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceTerminatedReason, clusterv1.ConditionSeverityInfo, "")
controllerutil.RemoveFinalizer(machineScope.OCIMachine, infrastructurev1beta2.MachineFinalizer)
machineScope.Info("Instance is deleted")
r.Recorder.Eventf(machineScope.OCIMachine, corev1.EventTypeNormal,
"InstanceTerminated", "Deleted the instance")
return reconcile.Result{}, nil
default:
if !machineScope.IsResourceCreatedByClusterAPI(instance.FreeformTags) {
return reconcile.Result{}, errors.New("instance is not created by current cluster")
}
err := r.deleteInstanceFromControlPlaneLB(ctx, machineScope)
if err != nil {
return reconcile.Result{}, err
}
if err := machineScope.DeleteMachine(ctx, instance); err != nil {
machineScope.Error(err, "Error deleting Instance")
return ctrl.Result{}, errors.Wrapf(err, "error deleting instance %s", machineScope.Name())
}
conditions.MarkFalse(machineScope.OCIMachine, infrastructurev1beta2.InstanceReadyCondition, infrastructurev1beta2.InstanceTerminatingReason, clusterv1.ConditionSeverityInfo, "")
r.Recorder.Eventf(machineScope.OCIMachine, corev1.EventTypeNormal,
"InstanceTerminating", "Terminating the instance")
return reconcile.Result{RequeueAfter: 30 * time.Second}, nil
}
}
func (r *OCIMachineReconciler) deleteInstanceFromControlPlaneLB(ctx context.Context, machineScope *scope.MachineScope) error {
if machineScope.IsControlPlane() {
err := machineScope.ReconcileDeleteInstanceOnLB(ctx)
if err != nil {
return err
}
machineScope.Info("Instance is removed from the control plane LB")
r.Recorder.Eventf(machineScope.OCIMachine, corev1.EventTypeNormal, "OCIMachineRemovedFromLB",
"Instance has been removed from the control plane LB")
}
return nil
}