From a38c1f1993a8359ba73fd1257c68ba558bacf90e Mon Sep 17 00:00:00 2001 From: Yuichiro Ueno Date: Sat, 23 Oct 2021 20:29:41 +0900 Subject: [PATCH] ceph: add context parameter to opcontroller This commit adds context parameter to utilities in opcontroller to remove context.TODO use in opcontroller. By this, we can handle cancellation of reconcilers in a fine-grained way. Part of #8701. Signed-off-by: Yuichiro Ueno --- pkg/operator/ceph/client/controller.go | 8 ++++---- pkg/operator/ceph/cluster/cluster.go | 16 ++++++++-------- pkg/operator/ceph/cluster/cluster_external.go | 2 +- pkg/operator/ceph/cluster/controller.go | 12 ++++++------ pkg/operator/ceph/cluster/osd/create.go | 4 ++-- .../ceph/cluster/osd/integration_test.go | 2 +- pkg/operator/ceph/cluster/osd/osd_test.go | 2 +- pkg/operator/ceph/cluster/osd/update.go | 4 ++-- pkg/operator/ceph/cluster/osd/update_test.go | 2 +- pkg/operator/ceph/cluster/rbd/controller.go | 2 +- pkg/operator/ceph/controller/conditions.go | 4 ++-- pkg/operator/ceph/controller/controller_utils.go | 4 ++-- pkg/operator/ceph/controller/finalizer.go | 14 +++++++------- pkg/operator/ceph/controller/finalizer_test.go | 7 ++++--- pkg/operator/ceph/controller/handler.go | 4 ++-- pkg/operator/ceph/controller/handler_test.go | 3 ++- .../ceph/controller/object_operations.go | 6 +++--- pkg/operator/ceph/file/controller.go | 14 +++++++------- pkg/operator/ceph/file/mirror/controller.go | 2 +- pkg/operator/ceph/nfs/controller.go | 8 ++++---- pkg/operator/ceph/object/controller.go | 8 ++++---- pkg/operator/ceph/object/realm/controller.go | 2 +- pkg/operator/ceph/object/user/controller.go | 12 ++++++------ pkg/operator/ceph/object/zone/controller.go | 2 +- pkg/operator/ceph/object/zonegroup/controller.go | 2 +- pkg/operator/ceph/pool/controller.go | 14 +++++++------- 26 files changed, 81 insertions(+), 79 deletions(-) diff --git a/pkg/operator/ceph/client/controller.go b/pkg/operator/ceph/client/controller.go index 131d574dd1caa..bd36e33c86d9a 100644 --- a/pkg/operator/ceph/client/controller.go +++ b/pkg/operator/ceph/client/controller.go @@ -141,7 +141,7 @@ func (r *ReconcileCephClient) reconcile(request reconcile.Request) (reconcile.Re } // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephClient) + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephClient) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") } @@ -152,7 +152,7 @@ func (r *ReconcileCephClient) reconcile(request reconcile.Request) (reconcile.Re } // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deletePool() function since everything is gone already @@ -162,7 +162,7 @@ func (r *ReconcileCephClient) reconcile(request reconcile.Request) (reconcile.Re // This handles the case where the operator is not ready to accept Ceph command but the cluster exists if !cephClient.GetDeletionTimestamp().IsZero() && !cephClusterExists { // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephClient) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephClient) if err != nil { return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") } @@ -189,7 +189,7 @@ func (r *ReconcileCephClient) reconcile(request reconcile.Request) (reconcile.Re } // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephClient) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephClient) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } diff --git a/pkg/operator/ceph/cluster/cluster.go b/pkg/operator/ceph/cluster/cluster.go index c6482b19f96aa..7ef5de4d3ecd3 100755 --- a/pkg/operator/ceph/cluster/cluster.go +++ b/pkg/operator/ceph/cluster/cluster.go @@ -92,7 +92,7 @@ func (c *cluster) reconcileCephDaemons(rookImage string, cephVersion cephver.Cep } // Start the mon pods - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mons") + controller.UpdateCondition(c.ClusterInfo.Context, c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mons") clusterInfo, err := c.mons.Start(c.ClusterInfo, rookImage, cephVersion, *c.Spec) if err != nil { return errors.Wrap(err, "failed to start ceph monitors") @@ -120,7 +120,7 @@ func (c *cluster) reconcileCephDaemons(rookImage string, cephVersion cephver.Cep } // Start Ceph manager - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mgr(s)") + controller.UpdateCondition(c.ClusterInfo.Context, c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph Mgr(s)") mgrs := mgr.New(c.context, c.ClusterInfo, *c.Spec, rookImage) err = mgrs.Start() if err != nil { @@ -128,7 +128,7 @@ func (c *cluster) reconcileCephDaemons(rookImage string, cephVersion cephver.Cep } // Start the OSDs - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph OSDs") + controller.UpdateCondition(c.ClusterInfo.Context, c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring Ceph OSDs") osds := osd.New(c.context, c.ClusterInfo, *c.Spec, rookImage) err = osds.Start() if err != nil { @@ -179,7 +179,7 @@ func (c *ClusterController) initializeCluster(cluster *cluster) error { if cluster.Spec.External.Enable { err := c.configureExternalCephCluster(cluster) if err != nil { - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionFalse, cephv1.ClusterProgressingReason, err.Error()) + controller.UpdateCondition(c.OpManagerCtx, c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionFalse, cephv1.ClusterProgressingReason, err.Error()) return errors.Wrap(err, "failed to configure external ceph cluster") } } else { @@ -194,7 +194,7 @@ func (c *ClusterController) initializeCluster(cluster *cluster) error { err = c.configureLocalCephCluster(cluster) if err != nil { - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionFalse, cephv1.ClusterProgressingReason, err.Error()) + controller.UpdateCondition(c.OpManagerCtx, c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionFalse, cephv1.ClusterProgressingReason, err.Error()) return errors.Wrap(err, "failed to configure local ceph cluster") } } @@ -216,7 +216,7 @@ func (c *ClusterController) configureLocalCephCluster(cluster *cluster) error { } // Run image validation job - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Detecting Ceph version") + controller.UpdateCondition(c.OpManagerCtx, c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Detecting Ceph version") cephVersion, isUpgrade, err := c.detectAndValidateCephVersion(cluster) if err != nil { return errors.Wrap(err, "failed the ceph version check") @@ -231,7 +231,7 @@ func (c *ClusterController) configureLocalCephCluster(cluster *cluster) error { } } - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring the Ceph cluster") + controller.UpdateCondition(c.OpManagerCtx, c.context, c.namespacedName, cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, "Configuring the Ceph cluster") cluster.ClusterInfo.Context = c.OpManagerCtx // Run the orchestration @@ -241,7 +241,7 @@ func (c *ClusterController) configureLocalCephCluster(cluster *cluster) error { } // Set the condition to the cluster object - controller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionReady, v1.ConditionTrue, cephv1.ClusterCreatedReason, "Cluster created successfully") + controller.UpdateCondition(c.OpManagerCtx, c.context, c.namespacedName, cephv1.ConditionReady, v1.ConditionTrue, cephv1.ClusterCreatedReason, "Cluster created successfully") return nil } diff --git a/pkg/operator/ceph/cluster/cluster_external.go b/pkg/operator/ceph/cluster/cluster_external.go index e7f4246b01b7a..95e071fd3057d 100644 --- a/pkg/operator/ceph/cluster/cluster_external.go +++ b/pkg/operator/ceph/cluster/cluster_external.go @@ -44,7 +44,7 @@ func (c *ClusterController) configureExternalCephCluster(cluster *cluster) error return errors.Wrap(err, "failed to validate external cluster specs") } - opcontroller.UpdateCondition(c.context, c.namespacedName, cephv1.ConditionConnecting, v1.ConditionTrue, cephv1.ClusterConnectingReason, "Attempting to connect to an external Ceph cluster") + opcontroller.UpdateCondition(c.OpManagerCtx, c.context, c.namespacedName, cephv1.ConditionConnecting, v1.ConditionTrue, cephv1.ClusterConnectingReason, "Attempting to connect to an external Ceph cluster") // loop until we find the secret necessary to connect to the external cluster // then populate clusterInfo diff --git a/pkg/operator/ceph/cluster/controller.go b/pkg/operator/ceph/cluster/controller.go index 1db50677b4a09..46754e2c385c8 100644 --- a/pkg/operator/ceph/cluster/controller.go +++ b/pkg/operator/ceph/cluster/controller.go @@ -104,7 +104,7 @@ type ReconcileCephCluster struct { // Add creates a new CephCluster Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, ctx *clusterd.Context, clusterController *ClusterController, opManagerContext context.Context) error { - return add(mgr, newReconciler(mgr, ctx, clusterController, opManagerContext), ctx) + return add(opManagerContext, mgr, newReconciler(mgr, ctx, clusterController, opManagerContext), ctx) } // newReconciler returns a new reconcile.Reconciler @@ -123,7 +123,7 @@ func newReconciler(mgr manager.Manager, ctx *clusterd.Context, clusterController } } -func add(mgr manager.Manager, r reconcile.Reconciler, context *clusterd.Context) error { +func add(opManagerContext context.Context, mgr manager.Manager, r reconcile.Reconciler, context *clusterd.Context) error { // Create a new controller c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) if err != nil { @@ -162,7 +162,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, context *clusterd.Context) // Build Handler function to return the list of ceph clusters // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephClusterList{}, mgr.GetScheme()) + handlerFunc, err := opcontroller.ObjectToCRMapper(opManagerContext, mgr.GetClient(), &cephv1.CephClusterList{}, mgr.GetScheme()) if err != nil { return err } @@ -244,7 +244,7 @@ func (r *ReconcileCephCluster) reconcile(request reconcile.Request) (reconcile.R } // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephCluster) + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephCluster) if err != nil { return reconcile.Result{}, cephCluster, errors.Wrap(err, "failed to add finalizer") } @@ -496,12 +496,12 @@ func (r *ReconcileCephCluster) removeFinalizer(client client.Client, name types. } if finalizer == "" { - err = opcontroller.RemoveFinalizer(client, obj) + err = opcontroller.RemoveFinalizer(r.opManagerContext, client, obj) if err != nil { return errors.Wrap(err, "failed to remove finalizer") } } else { - err = opcontroller.RemoveFinalizerWithName(client, obj, finalizer) + err = opcontroller.RemoveFinalizerWithName(r.opManagerContext, client, obj, finalizer) if err != nil { return errors.Wrapf(err, "failed to remove finalizer %q", finalizer) } diff --git a/pkg/operator/ceph/cluster/osd/create.go b/pkg/operator/ceph/cluster/osd/create.go index 6eb799f9982f7..cc5ecf2d062b3 100644 --- a/pkg/operator/ceph/cluster/osd/create.go +++ b/pkg/operator/ceph/cluster/osd/create.go @@ -386,7 +386,7 @@ func createDaemonOnPVC(c *Cluster, osd OSDInfo, pvcName string, config *provisio } message := fmt.Sprintf("Processing OSD %d on PVC %q", osd.ID, pvcName) - updateConditionFunc(c.context, c.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) + updateConditionFunc(c.clusterInfo.Context, c.context, c.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) _, err = k8sutil.CreateDeployment(c.context.Clientset, d) return errors.Wrapf(err, "failed to create deployment for OSD %d on PVC %q", osd.ID, pvcName) @@ -399,7 +399,7 @@ func createDaemonOnNode(c *Cluster, osd OSDInfo, nodeName string, config *provis } message := fmt.Sprintf("Processing OSD %d on node %q", osd.ID, nodeName) - updateConditionFunc(c.context, c.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) + updateConditionFunc(c.clusterInfo.Context, c.context, c.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) _, err = k8sutil.CreateDeployment(c.context.Clientset, d) return errors.Wrapf(err, "failed to create deployment for OSD %d on node %q", osd.ID, nodeName) diff --git a/pkg/operator/ceph/cluster/osd/integration_test.go b/pkg/operator/ceph/cluster/osd/integration_test.go index 0b0d2491cafa3..3e77906f3b1f1 100644 --- a/pkg/operator/ceph/cluster/osd/integration_test.go +++ b/pkg/operator/ceph/cluster/osd/integration_test.go @@ -110,7 +110,7 @@ func testOSDIntegration(t *testing.T) { }() // stub out the conditionExportFunc to do nothing. we do not have a fake Rook interface that // allows us to interact with a CephCluster resource like the fake K8s clientset. - updateConditionFunc = func(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { + updateConditionFunc = func(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { // do nothing } diff --git a/pkg/operator/ceph/cluster/osd/osd_test.go b/pkg/operator/ceph/cluster/osd/osd_test.go index 83acd33c6f880..e5e4a4a142ae1 100644 --- a/pkg/operator/ceph/cluster/osd/osd_test.go +++ b/pkg/operator/ceph/cluster/osd/osd_test.go @@ -132,7 +132,7 @@ func TestAddRemoveNode(t *testing.T) { }() // stub out the conditionExportFunc to do nothing. we do not have a fake Rook interface that // allows us to interact with a CephCluster resource like the fake K8s clientset. - updateConditionFunc = func(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { + updateConditionFunc = func(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { // do nothing } diff --git a/pkg/operator/ceph/cluster/osd/update.go b/pkg/operator/ceph/cluster/osd/update.go index 00c63b8c22300..2658c50ef78fa 100644 --- a/pkg/operator/ceph/cluster/osd/update.go +++ b/pkg/operator/ceph/cluster/osd/update.go @@ -147,7 +147,7 @@ func (c *updateConfig) updateExistingOSDs(errs *provisionErrors) { updatedDep, err = deploymentOnPVCFunc(c.cluster, osdInfo, nodeOrPVCName, c.provisionConfig) message := fmt.Sprintf("Processing OSD %d on PVC %q", osdID, nodeOrPVCName) - updateConditionFunc(c.cluster.context, c.cluster.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) + updateConditionFunc(c.cluster.clusterInfo.Context, c.cluster.context, c.cluster.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) } else { if !c.cluster.ValidStorage.NodeExists(nodeOrPVCName) { // node will not reconcile, so don't update the deployment @@ -164,7 +164,7 @@ func (c *updateConfig) updateExistingOSDs(errs *provisionErrors) { updatedDep, err = deploymentOnNodeFunc(c.cluster, osdInfo, nodeOrPVCName, c.provisionConfig) message := fmt.Sprintf("Processing OSD %d on node %q", osdID, nodeOrPVCName) - updateConditionFunc(c.cluster.context, c.cluster.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) + updateConditionFunc(c.cluster.clusterInfo.Context, c.cluster.context, c.cluster.clusterInfo.NamespacedName(), cephv1.ConditionProgressing, v1.ConditionTrue, cephv1.ClusterProgressingReason, message) } if err != nil { errs.addError("%v", errors.Wrapf(err, "failed to update OSD %d", osdID)) diff --git a/pkg/operator/ceph/cluster/osd/update_test.go b/pkg/operator/ceph/cluster/osd/update_test.go index 9f25dc63bf458..5e5ed69a497b6 100644 --- a/pkg/operator/ceph/cluster/osd/update_test.go +++ b/pkg/operator/ceph/cluster/osd/update_test.go @@ -121,7 +121,7 @@ func Test_updateExistingOSDs(t *testing.T) { // stub out the conditionExportFunc to do nothing. we do not have a fake Rook interface that // allows us to interact with a CephCluster resource like the fake K8s clientset. - updateConditionFunc = func(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { + updateConditionFunc = func(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status corev1.ConditionStatus, reason cephv1.ConditionReason, message string) { // do nothing } shouldCheckOkToStopFunc = func(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) bool { diff --git a/pkg/operator/ceph/cluster/rbd/controller.go b/pkg/operator/ceph/cluster/rbd/controller.go index f57974e33db83..8411cbf515085 100644 --- a/pkg/operator/ceph/cluster/rbd/controller.go +++ b/pkg/operator/ceph/cluster/rbd/controller.go @@ -172,7 +172,7 @@ func (r *ReconcileCephRBDMirror) reconcile(request reconcile.Request) (reconcile } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { logger.Debugf("CephCluster resource not ready in namespace %q, retrying in %q.", request.NamespacedName.Namespace, reconcileResponse.RequeueAfter.String()) return reconcileResponse, nil diff --git a/pkg/operator/ceph/controller/conditions.go b/pkg/operator/ceph/controller/conditions.go index 2cb01f201cfb8..64f00d071076a 100644 --- a/pkg/operator/ceph/controller/conditions.go +++ b/pkg/operator/ceph/controller/conditions.go @@ -30,10 +30,10 @@ import ( ) // UpdateCondition function will export each condition into the cluster custom resource -func UpdateCondition(c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status v1.ConditionStatus, reason cephv1.ConditionReason, message string) { +func UpdateCondition(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, conditionType cephv1.ConditionType, status v1.ConditionStatus, reason cephv1.ConditionReason, message string) { // use client.Client unit test this more easily with updating statuses which must use the client cluster := &cephv1.CephCluster{} - if err := c.Client.Get(context.TODO(), namespaceName, cluster); err != nil { + if err := c.Client.Get(ctx, namespaceName, cluster); err != nil { logger.Errorf("failed to get cluster %v to update the conditions. %v", namespaceName, err) return } diff --git a/pkg/operator/ceph/controller/controller_utils.go b/pkg/operator/ceph/controller/controller_utils.go index 65849671dfc8e..ed399c761a78d 100644 --- a/pkg/operator/ceph/controller/controller_utils.go +++ b/pkg/operator/ceph/controller/controller_utils.go @@ -114,14 +114,14 @@ func canIgnoreHealthErrStatusInReconcile(cephCluster cephv1.CephCluster, control } // IsReadyToReconcile determines if a controller is ready to reconcile or not -func IsReadyToReconcile(c client.Client, clustercontext *clusterd.Context, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result) { +func IsReadyToReconcile(ctx context.Context, c client.Client, clustercontext *clusterd.Context, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result) { cephClusterExists := false // Running ceph commands won't work and the controller will keep re-queuing so I believe it's fine not to check // Make sure a CephCluster exists before doing anything var cephCluster cephv1.CephCluster clusterList := &cephv1.CephClusterList{} - err := c.List(context.TODO(), clusterList, client.InNamespace(namespacedName.Namespace)) + err := c.List(ctx, clusterList, client.InNamespace(namespacedName.Namespace)) if err != nil { logger.Errorf("%q: failed to fetch CephCluster %v", controllerName, err) return cephCluster, false, cephClusterExists, ImmediateRetryResult diff --git a/pkg/operator/ceph/controller/finalizer.go b/pkg/operator/ceph/controller/finalizer.go index 7f4d127e5a1c1..e214e88ddc518 100644 --- a/pkg/operator/ceph/controller/finalizer.go +++ b/pkg/operator/ceph/controller/finalizer.go @@ -52,7 +52,7 @@ func remove(list []string, s string) []string { // AddFinalizerIfNotPresent adds a finalizer an object to avoid instant deletion // of the object without finalizing it. -func AddFinalizerIfNotPresent(client client.Client, obj client.Object) error { +func AddFinalizerIfNotPresent(ctx context.Context, client client.Client, obj client.Object) error { objectFinalizer := buildFinalizerName(obj.GetObjectKind().GroupVersionKind().Kind) accessor, err := meta.Accessor(obj) @@ -65,7 +65,7 @@ func AddFinalizerIfNotPresent(client client.Client, obj client.Object) error { accessor.SetFinalizers(append(accessor.GetFinalizers(), objectFinalizer)) // Update CR with finalizer - if err := client.Update(context.TODO(), obj); err != nil { + if err := client.Update(ctx, obj); err != nil { return errors.Wrapf(err, "failed to add finalizer %q on %q", objectFinalizer, accessor.GetName()) } } @@ -74,14 +74,14 @@ func AddFinalizerIfNotPresent(client client.Client, obj client.Object) error { } // RemoveFinalizer removes a finalizer from an object -func RemoveFinalizer(client client.Client, obj client.Object) error { +func RemoveFinalizer(ctx context.Context, client client.Client, obj client.Object) error { finalizerName := buildFinalizerName(obj.GetObjectKind().GroupVersionKind().Kind) - return RemoveFinalizerWithName(client, obj, finalizerName) + return RemoveFinalizerWithName(ctx, client, obj, finalizerName) } // RemoveFinalizerWithName removes finalizer passed as an argument from an object -func RemoveFinalizerWithName(client client.Client, obj client.Object, finalizerName string) error { - err := client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, obj) +func RemoveFinalizerWithName(ctx context.Context, client client.Client, obj client.Object, finalizerName string) error { + err := client.Get(ctx, types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, obj) if err != nil { return errors.Wrap(err, "failed to get the latest version of the object") } @@ -93,7 +93,7 @@ func RemoveFinalizerWithName(client client.Client, obj client.Object, finalizerN if contains(accessor.GetFinalizers(), finalizerName) { logger.Infof("removing finalizer %q on %q", finalizerName, accessor.GetName()) accessor.SetFinalizers(remove(accessor.GetFinalizers(), finalizerName)) - if err := client.Update(context.TODO(), obj); err != nil { + if err := client.Update(ctx, obj); err != nil { return errors.Wrapf(err, "failed to remove finalizer %q on %q", finalizerName, accessor.GetName()) } } diff --git a/pkg/operator/ceph/controller/finalizer_test.go b/pkg/operator/ceph/controller/finalizer_test.go index a325fe37eb9a6..8c06ebbe158db 100644 --- a/pkg/operator/ceph/controller/finalizer_test.go +++ b/pkg/operator/ceph/controller/finalizer_test.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "testing" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" @@ -45,7 +46,7 @@ func TestAddFinalizerIfNotPresent(t *testing.T) { cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() assert.Empty(t, fakeObject.Finalizers) - err := AddFinalizerIfNotPresent(cl, fakeObject) + err := AddFinalizerIfNotPresent(context.TODO(), cl, fakeObject) assert.NoError(t, err) assert.NotEmpty(t, fakeObject.Finalizers) } @@ -72,7 +73,7 @@ func TestRemoveFinalizer(t *testing.T) { cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() assert.NotEmpty(t, fakeObject.Finalizers) - err := RemoveFinalizer(cl, fakeObject) + err := RemoveFinalizer(context.TODO(), cl, fakeObject) assert.NoError(t, err) assert.Empty(t, fakeObject.Finalizers) } @@ -99,7 +100,7 @@ func TestRemoveFinalizerWithName(t *testing.T) { cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() assert.NotEmpty(t, fakeObject.Finalizers) - err := RemoveFinalizerWithName(cl, fakeObject, "cephblockpool.ceph.rook.io") + err := RemoveFinalizerWithName(context.TODO(), cl, fakeObject, "cephblockpool.ceph.rook.io") assert.NoError(t, err) assert.Empty(t, fakeObject.Finalizers) } diff --git a/pkg/operator/ceph/controller/handler.go b/pkg/operator/ceph/controller/handler.go index 813a2abb22f3c..a457f7ac5ee35 100644 --- a/pkg/operator/ceph/controller/handler.go +++ b/pkg/operator/ceph/controller/handler.go @@ -33,7 +33,7 @@ import ( // It is used to trigger a reconcile object Kind A when watching object Kind B // So we reconcile Kind A instead of Kind B // For instance, we watch for CephCluster CR changes but want to reconcile CephFilesystem based on a Spec change -func ObjectToCRMapper(c client.Client, ro runtime.Object, scheme *runtime.Scheme) (handler.MapFunc, error) { +func ObjectToCRMapper(ctx context.Context, c client.Client, ro runtime.Object, scheme *runtime.Scheme) (handler.MapFunc, error) { if _, ok := ro.(metav1.ListInterface); !ok { return nil, errors.Errorf("expected a metav1.ListInterface, got %T instead", ro) } @@ -47,7 +47,7 @@ func ObjectToCRMapper(c client.Client, ro runtime.Object, scheme *runtime.Scheme return handler.MapFunc(func(o client.Object) []ctrl.Request { list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(gvk) - err := c.List(context.TODO(), list) + err := c.List(ctx, list) if err != nil { return nil } diff --git a/pkg/operator/ceph/controller/handler_test.go b/pkg/operator/ceph/controller/handler_test.go index e5b3b127278ce..0fb0557c616de 100644 --- a/pkg/operator/ceph/controller/handler_test.go +++ b/pkg/operator/ceph/controller/handler_test.go @@ -17,6 +17,7 @@ limitations under the License. package controller import ( + "context" "reflect" "testing" @@ -61,7 +62,7 @@ func TestObjectToCRMapper(t *testing.T) { {NamespacedName: client.ObjectKey{Name: "my-pool", Namespace: "rook-ceph"}}, } - handlerFunc, err := ObjectToCRMapper(cl, objects[0], s) + handlerFunc, err := ObjectToCRMapper(context.TODO(), cl, objects[0], s) assert.NoError(t, err) assert.ElementsMatch(t, fakeRequest, handlerFunc(fs)) } diff --git a/pkg/operator/ceph/controller/object_operations.go b/pkg/operator/ceph/controller/object_operations.go index 668841657dc53..2b55430274876 100644 --- a/pkg/operator/ceph/controller/object_operations.go +++ b/pkg/operator/ceph/controller/object_operations.go @@ -28,7 +28,7 @@ import ( ) // CreateOrUpdateObject updates an object with a given status -func CreateOrUpdateObject(client client.Client, obj client.Object) error { +func CreateOrUpdateObject(ctx context.Context, client client.Client, obj client.Object) error { accessor, err := meta.Accessor(obj) if err != nil { return errors.Wrap(err, "failed to get meta information of object") @@ -38,10 +38,10 @@ func CreateOrUpdateObject(client client.Client, obj client.Object) error { // Somehow meta.TypeAccessor returns an empty string for the type name so using reflection instead objType := reflect.TypeOf(obj) - err = client.Create(context.TODO(), obj) + err = client.Create(ctx, obj) if err != nil { if kerrors.IsAlreadyExists(err) { - err = client.Update(context.TODO(), obj) + err = client.Update(ctx, obj) if err != nil { return errors.Wrapf(err, "failed to update ceph %q object %q", objType, objName) } diff --git a/pkg/operator/ceph/file/controller.go b/pkg/operator/ceph/file/controller.go index 98629a65d1889..ec77e79b3a000 100644 --- a/pkg/operator/ceph/file/controller.go +++ b/pkg/operator/ceph/file/controller.go @@ -90,7 +90,7 @@ type fsHealth struct { // Add creates a new CephFilesystem Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, context *clusterd.Context, opManagerContext context.Context, opConfig opcontroller.OperatorConfig) error { - return add(mgr, newReconciler(mgr, context, opManagerContext, opConfig)) + return add(opManagerContext, mgr, newReconciler(mgr, context, opManagerContext, opConfig)) } // newReconciler returns a new reconcile.Reconciler @@ -105,7 +105,7 @@ func newReconciler(mgr manager.Manager, context *clusterd.Context, opManagerCont } } -func add(mgr manager.Manager, r reconcile.Reconciler) error { +func add(opManagerContext context.Context, mgr manager.Manager, r reconcile.Reconciler) error { // Create a new controller c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) if err != nil { @@ -132,7 +132,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { // Build Handler function to return the list of ceph filesystems // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephFilesystemList{}, mgr.GetScheme()) + handlerFunc, err := opcontroller.ObjectToCRMapper(opManagerContext, mgr.GetClient(), &cephv1.CephFilesystemList{}, mgr.GetScheme()) if err != nil { return err } @@ -174,7 +174,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil } // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephFilesystem) + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephFilesystem) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") } @@ -185,7 +185,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteFilesystem() function since everything is gone already @@ -195,7 +195,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil // This handles the case where the operator is not ready to accept Ceph command but the cluster exists if !cephFilesystem.GetDeletionTimestamp().IsZero() && !cephClusterExists { // Remove finalizer - err := opcontroller.RemoveFinalizer(r.client, cephFilesystem) + err := opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephFilesystem) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } @@ -250,7 +250,7 @@ func (r *ReconcileCephFilesystem) reconcile(request reconcile.Request) (reconcil } // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephFilesystem) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephFilesystem) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } diff --git a/pkg/operator/ceph/file/mirror/controller.go b/pkg/operator/ceph/file/mirror/controller.go index ca5aa9e1b79d8..a12d2f7fb4ffe 100644 --- a/pkg/operator/ceph/file/mirror/controller.go +++ b/pkg/operator/ceph/file/mirror/controller.go @@ -158,7 +158,7 @@ func (r *ReconcileFilesystemMirror) reconcile(request reconcile.Request) (reconc } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, _, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { logger.Debugf("CephCluster resource not ready in namespace %q, retrying in %q.", request.NamespacedName.Namespace, reconcileResponse.RequeueAfter.String()) return reconcileResponse, nil diff --git a/pkg/operator/ceph/nfs/controller.go b/pkg/operator/ceph/nfs/controller.go index 348cacd13733a..dd949c9053ee9 100644 --- a/pkg/operator/ceph/nfs/controller.go +++ b/pkg/operator/ceph/nfs/controller.go @@ -157,7 +157,7 @@ func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Resul } // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephNFS) + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephNFS) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") } @@ -168,7 +168,7 @@ func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Resul } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteStore() function since everything is gone already @@ -178,7 +178,7 @@ func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Resul // This handles the case where the operator is not ready to accept Ceph command but the cluster exists if !cephNFS.GetDeletionTimestamp().IsZero() && !cephClusterExists { // Remove finalizer - err := opcontroller.RemoveFinalizer(r.client, cephNFS) + err := opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephNFS) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } @@ -214,7 +214,7 @@ func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Resul } // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephNFS) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephNFS) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } diff --git a/pkg/operator/ceph/object/controller.go b/pkg/operator/ceph/object/controller.go index bc7558e9612c4..8d29a29e46c11 100644 --- a/pkg/operator/ceph/object/controller.go +++ b/pkg/operator/ceph/object/controller.go @@ -173,7 +173,7 @@ func (r *ReconcileCephObjectStore) reconcile(request reconcile.Request) (reconci } // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephObjectStore) + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephObjectStore) if err != nil { return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to add finalizer") } @@ -185,7 +185,7 @@ func (r *ReconcileCephObjectStore) reconcile(request reconcile.Request) (reconci } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteStore() function since everything is gone already @@ -195,7 +195,7 @@ func (r *ReconcileCephObjectStore) reconcile(request reconcile.Request) (reconci // This handles the case where the operator is not ready to accept Ceph command but the cluster exists if !cephObjectStore.GetDeletionTimestamp().IsZero() && !cephClusterExists { // Remove finalizer - err := opcontroller.RemoveFinalizer(r.client, cephObjectStore) + err := opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephObjectStore) if err != nil { return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to remove finalizer") } @@ -283,7 +283,7 @@ func (r *ReconcileCephObjectStore) reconcile(request reconcile.Request) (reconci } // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStore) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephObjectStore) if err != nil { return reconcile.Result{}, cephObjectStore, errors.Wrap(err, "failed to remove finalizer") } diff --git a/pkg/operator/ceph/object/realm/controller.go b/pkg/operator/ceph/object/realm/controller.go index 43b36637d288d..2e900b3be93a6 100644 --- a/pkg/operator/ceph/object/realm/controller.go +++ b/pkg/operator/ceph/object/realm/controller.go @@ -145,7 +145,7 @@ func (r *ReconcileObjectRealm) reconcile(request reconcile.Request) (reconcile.R } // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR if !cephObjectRealm.GetDeletionTimestamp().IsZero() && !cephClusterExists { diff --git a/pkg/operator/ceph/object/user/controller.go b/pkg/operator/ceph/object/user/controller.go index 1e1c983bfd4f9..8203cfa4f8b36 100644 --- a/pkg/operator/ceph/object/user/controller.go +++ b/pkg/operator/ceph/object/user/controller.go @@ -148,7 +148,7 @@ func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconci } // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephObjectStoreUser) + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephObjectStoreUser) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") } @@ -159,7 +159,7 @@ func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconci } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deleteUser() function since everything is gone already @@ -169,7 +169,7 @@ func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconci // This handles the case where the operator is not ready to accept Ceph command but the cluster exists if !cephObjectStoreUser.GetDeletionTimestamp().IsZero() && !cephClusterExists { // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStoreUser) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephObjectStoreUser) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } @@ -192,7 +192,7 @@ func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconci if err != nil { if !cephObjectStoreUser.GetDeletionTimestamp().IsZero() { // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStoreUser) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephObjectStoreUser) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } @@ -219,7 +219,7 @@ func (r *ReconcileObjectStoreUser) reconcile(request reconcile.Request) (reconci } // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephObjectStoreUser) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephObjectStoreUser) if err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") } @@ -439,7 +439,7 @@ func (r *ReconcileObjectStoreUser) reconcileCephUserSecret(cephObjectStoreUser * } // Create Kubernetes Secret - err = opcontroller.CreateOrUpdateObject(r.client, secret) + err = opcontroller.CreateOrUpdateObject(r.opManagerContext, r.client, secret) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to create or update ceph object user %q secret", secret.Name) } diff --git a/pkg/operator/ceph/object/zone/controller.go b/pkg/operator/ceph/object/zone/controller.go index 4baed0d297278..f7948ee05ab3c 100644 --- a/pkg/operator/ceph/object/zone/controller.go +++ b/pkg/operator/ceph/object/zone/controller.go @@ -141,7 +141,7 @@ func (r *ReconcileObjectZone) reconcile(request reconcile.Request) (reconcile.Re } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // diff --git a/pkg/operator/ceph/object/zonegroup/controller.go b/pkg/operator/ceph/object/zonegroup/controller.go index 5d5a42c640b54..310ddafe4ae0b 100644 --- a/pkg/operator/ceph/object/zonegroup/controller.go +++ b/pkg/operator/ceph/object/zonegroup/controller.go @@ -139,7 +139,7 @@ func (r *ReconcileObjectZoneGroup) reconcile(request reconcile.Request) (reconci } // Make sure a CephCluster is present otherwise do nothing - _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + _, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR if !cephObjectZoneGroup.GetDeletionTimestamp().IsZero() && !cephClusterExists { diff --git a/pkg/operator/ceph/pool/controller.go b/pkg/operator/ceph/pool/controller.go index 2d62feef3de78..6ff135173d467 100644 --- a/pkg/operator/ceph/pool/controller.go +++ b/pkg/operator/ceph/pool/controller.go @@ -83,7 +83,7 @@ type blockPoolHealth struct { // Add creates a new CephBlockPool Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, context *clusterd.Context, opManagerContext context.Context, opConfig opcontroller.OperatorConfig) error { - return add(mgr, newReconciler(mgr, context, opManagerContext)) + return add(opManagerContext, mgr, newReconciler(mgr, context, opManagerContext)) } // newReconciler returns a new reconcile.Reconciler @@ -97,7 +97,7 @@ func newReconciler(mgr manager.Manager, context *clusterd.Context, opManagerCont } } -func add(mgr manager.Manager, r reconcile.Reconciler) error { +func add(opManagerContext context.Context, mgr manager.Manager, r reconcile.Reconciler) error { // Create a new controller c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) if err != nil { @@ -113,7 +113,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { // Build Handler function to return the list of ceph block pool // This is used by the watchers below - handlerFunc, err := opcontroller.ObjectToCRMapper(mgr.GetClient(), &cephv1.CephBlockPoolList{}, mgr.GetScheme()) + handlerFunc, err := opcontroller.ObjectToCRMapper(opManagerContext, mgr.GetClient(), &cephv1.CephBlockPoolList{}, mgr.GetScheme()) if err != nil { return err } @@ -155,7 +155,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile } // Set a finalizer so we can do cleanup before the object goes away - err = opcontroller.AddFinalizerIfNotPresent(r.client, cephBlockPool) + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephBlockPool) if err != nil { return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to add finalizer") } @@ -166,7 +166,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile } // Make sure a CephCluster is present otherwise do nothing - cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.client, r.context, request.NamespacedName, controllerName) + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, r.context, request.NamespacedName, controllerName) if !isReadyToReconcile { // This handles the case where the Ceph Cluster is gone and we want to delete that CR // We skip the deletePool() function since everything is gone already @@ -176,7 +176,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile // This handles the case where the operator is not ready to accept Ceph command but the cluster exists if !cephBlockPool.GetDeletionTimestamp().IsZero() && !cephClusterExists { // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephBlockPool) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephBlockPool) if err != nil { return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") } @@ -227,7 +227,7 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile } // Remove finalizer - err = opcontroller.RemoveFinalizer(r.client, cephBlockPool) + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephBlockPool) if err != nil { return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") }