From 66478955a61d0ee5f73a29741439f350e7c4b96e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Wed, 13 Oct 2021 12:26:38 +0200 Subject: [PATCH] ceph: remove default value for pool compression MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using a default value for CompressionMode to none effectively overrides any values for Parameters. It is deprecated but still takes precedence. Which means that in its previous form, Parameters was always ignored since CompressionMode was always set to none when empty. Signed-off-by: Sébastien Han (cherry picked from commit 28cc6f5514c83f907699614c20c2cd60acf619e1) --- .../charts/rook-ceph/templates/resources.yaml | 21 +- cluster/examples/kubernetes/ceph/crds.yaml | 21 +- pkg/apis/ceph.rook.io/v1/types.go | 3 +- pkg/daemon/ceph/client/pool.go | 4 +- pkg/operator/ceph/pool/validate.go | 18 +- pkg/operator/ceph/pool/validate_test.go | 238 ++++++++++-------- tests/framework/installer/ceph_manifests.go | 3 +- .../installer/ceph_manifests_v1.6.go | 3 +- 8 files changed, 165 insertions(+), 146 deletions(-) diff --git a/cluster/charts/rook-ceph/templates/resources.yaml b/cluster/charts/rook-ceph/templates/resources.yaml index fadb9197c578..5ce08e25f303 100644 --- a/cluster/charts/rook-ceph/templates/resources.yaml +++ b/cluster/charts/rook-ceph/templates/resources.yaml @@ -34,8 +34,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4458,8 +4457,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4626,8 +4624,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -6387,8 +6384,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7326,8 +7322,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7822,8 +7817,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7988,8 +7982,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive diff --git a/cluster/examples/kubernetes/ceph/crds.yaml b/cluster/examples/kubernetes/ceph/crds.yaml index c95853d4f783..35829a3ea4eb 100644 --- a/cluster/examples/kubernetes/ceph/crds.yaml +++ b/cluster/examples/kubernetes/ceph/crds.yaml @@ -36,8 +36,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4456,8 +4455,7 @@ spec: description: PoolSpec represents the spec of ceph pool properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -4624,8 +4622,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -6382,8 +6379,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7321,8 +7317,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7814,8 +7809,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive @@ -7980,8 +7974,7 @@ spec: nullable: true properties: compressionMode: - default: none - description: 'The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force)' + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' enum: - none - passive diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go index 6c2fb5855cdc..4c4604f3c73d 100755 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ b/pkg/apis/ceph.rook.io/v1/types.go @@ -602,9 +602,10 @@ type PoolSpec struct { // +nullable DeviceClass string `json:"deviceClass,omitempty"` + // DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" // The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) // +kubebuilder:validation:Enum=none;passive;aggressive;force;"" - // +kubebuilder:default=none + // Do NOT set a default value for kubebuilder as this will override the Parameters // +optional // +nullable CompressionMode string `json:"compressionMode,omitempty"` diff --git a/pkg/daemon/ceph/client/pool.go b/pkg/daemon/ceph/client/pool.go index f35a8720f658..5813c3df5c92 100644 --- a/pkg/daemon/ceph/client/pool.go +++ b/pkg/daemon/ceph/client/pool.go @@ -34,7 +34,7 @@ const ( confirmFlag = "--yes-i-really-mean-it" reallyConfirmFlag = "--yes-i-really-really-mean-it" targetSizeRatioProperty = "target_size_ratio" - compressionModeProperty = "compression_mode" + CompressionModeProperty = "compression_mode" PgAutoscaleModeProperty = "pg_autoscale_mode" PgAutoscaleModeOn = "on" ) @@ -252,7 +252,7 @@ func setCommonPoolProperties(context *clusterd.Context, clusterInfo *ClusterInfo } if pool.IsCompressionEnabled() { - pool.Parameters[compressionModeProperty] = pool.CompressionMode + pool.Parameters[CompressionModeProperty] = pool.CompressionMode } // Apply properties diff --git a/pkg/operator/ceph/pool/validate.go b/pkg/operator/ceph/pool/validate.go index f96864627602..44d96c113b65 100644 --- a/pkg/operator/ceph/pool/validate.go +++ b/pkg/operator/ceph/pool/validate.go @@ -139,11 +139,19 @@ func ValidatePoolSpec(context *clusterd.Context, clusterInfo *cephclient.Cluster // validate pool compression mode if specified if p.CompressionMode != "" { - switch p.CompressionMode { - case "none", "passive", "aggressive", "force": - break - default: - return errors.Errorf("unrecognized compression mode %q", p.CompressionMode) + logger.Warning("compressionMode is DEPRECATED, use Parameters instead") + } + + // Test the same for Parameters + if p.Parameters != nil { + compression, ok := p.Parameters[client.CompressionModeProperty] + if ok && compression != "" { + switch compression { + case "none", "passive", "aggressive", "force": + break + default: + return errors.Errorf("failed to validate pool spec unknown compression mode %q", compression) + } } } diff --git a/pkg/operator/ceph/pool/validate_test.go b/pkg/operator/ceph/pool/validate_test.go index 8bd1faf5b1cd..6674c201cc51 100644 --- a/pkg/operator/ceph/pool/validate_test.go +++ b/pkg/operator/ceph/pool/validate_test.go @@ -34,148 +34,170 @@ func TestValidatePool(t *testing.T) { clusterInfo := &cephclient.ClusterInfo{Namespace: "myns"} clusterSpec := &cephv1.ClusterSpec{} - // not specifying some replication or EC settings is fine - p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - err := ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("not specifying some replication or EC settings is fine", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // must specify name - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Namespace: clusterInfo.Namespace}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) + t.Run("must specify name", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Namespace: clusterInfo.Namespace}} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // must specify namespace - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) + t.Run("must specify namespace", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool"}} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // must not specify both replication and EC settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.ErasureCoded.CodingChunks = 2 - p.Spec.ErasureCoded.DataChunks = 3 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.NotNil(t, err) + t.Run("must not specify both replication and EC settings", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + p.Spec.ErasureCoded.CodingChunks = 2 + p.Spec.ErasureCoded.DataChunks = 3 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // succeed with replication settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("succeed with replication settings", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // size is 1 and RequireSafeReplicaSize is true - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = true - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // succeed with ec settings - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.ErasureCoded.CodingChunks = 1 - p.Spec.ErasureCoded.DataChunks = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("size is 1 and RequireSafeReplicaSize is true", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = true + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // Tests with various compression modes - // succeed with compression mode "none" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "none" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("succeed with ec settings", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.ErasureCoded.CodingChunks = 1 + p.Spec.ErasureCoded.DataChunks = 2 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // succeed with compression mode "aggressive" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "aggressive" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("fail Parameters['compression_mode'] is unknown", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + p.Spec.Parameters = map[string]string{"compression_mode": "foo"} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + assert.EqualError(t, err, "failed to validate pool spec unknown compression mode \"foo\"") + assert.Equal(t, "foo", p.Spec.Parameters["compression_mode"]) + }) - // fail with compression mode "unsupported" - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.Replicated.Size = 1 - p.Spec.Replicated.RequireSafeReplicaSize = false - p.Spec.CompressionMode = "unsupported" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("success Parameters['compression_mode'] is known", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.RequireSafeReplicaSize = false + p.Spec.Parameters = map[string]string{"compression_mode": "aggressive"} + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) - // fail since replica size is lower than ReplicasPerFailureDomain - p.Spec.Replicated.ReplicasPerFailureDomain = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("fail since replica size is lower than ReplicasPerFailureDomain", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 1 + p.Spec.Replicated.ReplicasPerFailureDomain = 2 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // fail since replica size is equal than ReplicasPerFailureDomain - p.Spec.Replicated.Size = 2 - p.Spec.Replicated.ReplicasPerFailureDomain = 2 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("fail since replica size is equal than ReplicasPerFailureDomain", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 2 + p.Spec.Replicated.ReplicasPerFailureDomain = 2 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // fail since ReplicasPerFailureDomain is not a power of 2 - p.Spec.Replicated.Size = 4 - p.Spec.Replicated.ReplicasPerFailureDomain = 3 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) + t.Run("fail since ReplicasPerFailureDomain is not a power of 2", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 4 + p.Spec.Replicated.ReplicasPerFailureDomain = 3 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // fail since ReplicasPerFailureDomain is not a power of 2 - p.Spec.Replicated.Size = 4 - p.Spec.Replicated.ReplicasPerFailureDomain = 5 - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // Failure the sub domain does not exist - p.Spec.Replicated.SubFailureDomain = "dummy" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Error(t, err) - - // succeed with ec pool and valid compression mode - p = cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} - p.Spec.ErasureCoded.CodingChunks = 1 - p.Spec.ErasureCoded.DataChunks = 2 - p.Spec.CompressionMode = "passive" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) - assert.Nil(t, err) + t.Run("fail since ReplicasPerFailureDomain is not a power of 2", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.Size = 4 + p.Spec.Replicated.ReplicasPerFailureDomain = 5 + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) - // Add mirror test mode - { + t.Run("failure the sub domain does not exist", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Replicated.SubFailureDomain = "dummy" + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.Error(t, err) + }) + + t.Run("succeed with ec pool and valid compression mode", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.ErasureCoded.CodingChunks = 1 + p.Spec.ErasureCoded.DataChunks = 2 + p.Spec.CompressionMode = "passive" + err := ValidatePool(context, clusterInfo, clusterSpec, &p) + assert.NoError(t, err) + }) + + t.Run("fail unrecognized mirroring mode", func(t *testing.T) { p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} p.Spec.Mirroring.Enabled = true p.Spec.Mirroring.Mode = "foo" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.Error(t, err) assert.EqualError(t, err, "unrecognized mirroring mode \"foo\". only 'image and 'pool' are supported") + }) - // Success mode is known + t.Run("success known mirroring mode", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Mirroring.Enabled = true p.Spec.Mirroring.Mode = "pool" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.NoError(t, err) + }) - // Error no interval specified + t.Run("fail mirroring mode no interval specified", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Mirroring.Enabled = true + p.Spec.Mirroring.Mode = "pool" p.Spec.Mirroring.SnapshotSchedules = []cephv1.SnapshotScheduleSpec{{StartTime: "14:00:00-05:00"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.Error(t, err) assert.EqualError(t, err, "schedule interval cannot be empty if start time is specified") + }) - // Success we have an interval + t.Run("fail mirroring mode we have a snap interval", func(t *testing.T) { + p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} + p.Spec.Mirroring.Enabled = true + p.Spec.Mirroring.Mode = "pool" p.Spec.Mirroring.SnapshotSchedules = []cephv1.SnapshotScheduleSpec{{Interval: "24h"}} - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.NoError(t, err) - } + }) - // Failure and subfailure domains - { + t.Run("failure and subfailure domains", func(t *testing.T) { p := cephv1.CephBlockPool{ObjectMeta: metav1.ObjectMeta{Name: "mypool", Namespace: clusterInfo.Namespace}} p.Spec.FailureDomain = "host" p.Spec.Replicated.SubFailureDomain = "host" - err = ValidatePool(context, clusterInfo, clusterSpec, &p) + err := ValidatePool(context, clusterInfo, clusterSpec, &p) assert.Error(t, err) assert.EqualError(t, err, "failure and subfailure domain cannot be identical") - } - + }) } func TestValidateCrushProperties(t *testing.T) { diff --git a/tests/framework/installer/ceph_manifests.go b/tests/framework/installer/ceph_manifests.go index 5b405230cf89..5c51f303bef9 100644 --- a/tests/framework/installer/ceph_manifests.go +++ b/tests/framework/installer/ceph_manifests.go @@ -275,7 +275,8 @@ spec: size: ` + replicaSize + ` targetSizeRatio: .5 requireSafeReplicaSize: false - compressionMode: aggressive + parameters: + compression_mode: aggressive mirroring: enabled: true mode: image diff --git a/tests/framework/installer/ceph_manifests_v1.6.go b/tests/framework/installer/ceph_manifests_v1.6.go index 17fe28542c01..3e67bf898708 100644 --- a/tests/framework/installer/ceph_manifests_v1.6.go +++ b/tests/framework/installer/ceph_manifests_v1.6.go @@ -231,7 +231,8 @@ spec: size: ` + replicaSize + ` targetSizeRatio: .5 requireSafeReplicaSize: false - compressionMode: aggressive + parameters: + compression_mode: aggressive mirroring: enabled: true mode: image