From 8c730e6f3cd7adcfbf2c882794605b4bda372b0f Mon Sep 17 00:00:00 2001 From: Rakshith R Date: Mon, 9 May 2022 16:44:07 +0530 Subject: [PATCH] core: treat cluster as not existing if it is allowed to uninstall with volumes IsReadyToReconcile() used to return cluster as not existing if cleanup policy was set to destroy and cluster marked for deletion. However, to allow proper deletion of corresponding PVCs, deletion of blockpool, filesystem CRs need to wait for rbd images and subvolumes to be deleted. Hence, consider cluster as not existing only when `AllowUninstallWithVolumes` is also set to true. Signed-off-by: Rakshith R --- .../ceph/controller/controller_utils.go | 6 ++++-- .../ceph/controller/controller_utils_test.go | 21 +++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/pkg/operator/ceph/controller/controller_utils.go b/pkg/operator/ceph/controller/controller_utils.go index dcd096d3c17b..6a17d53d80b2 100644 --- a/pkg/operator/ceph/controller/controller_utils.go +++ b/pkg/operator/ceph/controller/controller_utils.go @@ -137,8 +137,10 @@ func IsReadyToReconcile(ctx context.Context, c client.Client, namespacedName typ } cephCluster = clusterList.Items[0] - // If the cluster has a cleanup policy to destroy the cluster and it has been marked for deletion, treat it as if it does not exist - if cephCluster.Spec.CleanupPolicy.HasDataDirCleanPolicy() && !cephCluster.DeletionTimestamp.IsZero() { + // If the cluster has a cleanup policy to destroy the cluster, it has been marked for deletion and it is allowed to uninstall + // with volumes, treat it as if it does not exist + if cephCluster.Spec.CleanupPolicy.HasDataDirCleanPolicy() && !cephCluster.DeletionTimestamp.IsZero() && + cephCluster.Spec.CleanupPolicy.AllowUninstallWithVolumes { logger.Infof("%q: CephCluster has a destructive cleanup policy, allowing %q to be deleted", controllerName, namespacedName) return cephCluster, false, cephClusterExists, WaitForRequeueIfCephClusterNotReady } diff --git a/pkg/operator/ceph/controller/controller_utils_test.go b/pkg/operator/ceph/controller/controller_utils_test.go index d8872551084c..b38bd11785ba 100644 --- a/pkg/operator/ceph/controller/controller_utils_test.go +++ b/pkg/operator/ceph/controller/controller_utils_test.go @@ -173,6 +173,27 @@ func TestIsReadyToReconcile(t *testing.T) { c, ready, clusterExists, _ := IsReadyToReconcile(ctx.TODO(), client, clusterName, controllerName) assert.NotNil(t, c) assert.False(t, ready) + assert.True(t, clusterExists) + }) + + t.Run("cephcluster with cleanup policy when deleted and allowed to uninstall with volumes", func(t *testing.T) { + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName.Name, + Namespace: clusterName.Namespace, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Spec: cephv1.ClusterSpec{ + CleanupPolicy: cephv1.CleanupPolicySpec{ + Confirmation: cephv1.DeleteDataDirOnHostsConfirmation, + AllowUninstallWithVolumes: true, + }, + }} + objects := []runtime.Object{cephCluster} + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objects...).Build() + c, ready, clusterExists, _ := IsReadyToReconcile(ctx.TODO(), client, clusterName, controllerName) + assert.NotNil(t, c) + assert.False(t, ready) assert.False(t, clusterExists) }) }