From d43cfdd68846316c741a28b3f29cbec65484348e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Tue, 26 Oct 2021 18:32:48 +0200 Subject: [PATCH] nfs: add pool setting CR option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ths NFS spec now supports the CephBlockPool spec which means that it can take advantage of all the known settings like compression, size, failure domain etc. Closes: https://github.com/rook/rook/issues/9034 Signed-off-by: Sébastien Han --- Documentation/ceph-nfs-crd.md | 2 +- .../charts/rook-ceph/templates/resources.yaml | 168 +++++++++++++++++- cluster/examples/kubernetes/ceph/crds.yaml | 168 +++++++++++++++++- .../examples/kubernetes/ceph/nfs-test.yaml | 8 +- cluster/examples/kubernetes/ceph/nfs.yaml | 8 +- pkg/apis/ceph.rook.io/v1/types.go | 12 +- .../ceph.rook.io/v1/zz_generated.deepcopy.go | 7 +- pkg/operator/ceph/nfs/controller.go | 2 +- pkg/operator/ceph/nfs/controller_test.go | 130 -------------- pkg/operator/ceph/nfs/nfs.go | 23 +-- tests/scripts/validate_cluster.sh | 16 +- 11 files changed, 383 insertions(+), 161 deletions(-) diff --git a/Documentation/ceph-nfs-crd.md b/Documentation/ceph-nfs-crd.md index 527db58462195..b613169b4636e 100644 --- a/Documentation/ceph-nfs-crd.md +++ b/Documentation/ceph-nfs-crd.md @@ -90,7 +90,7 @@ ceph dashboard set-ganesha-clusters-rados-pool-namespace : **NOTE**: The RADOS settings aren't used in Ceph versions equal to or greater than Pacific 16.2.7, default values are used instead ".nfs" for the RADOS pool and the CephNFS CR's name for the RADOS namespace. However, RADOS settings are mandatory for versions preceding Pacific 16.2.7. diff --git a/cluster/charts/rook-ceph/templates/resources.yaml b/cluster/charts/rook-ceph/templates/resources.yaml index 11ecd69664fbb..6e948e92446dc 100644 --- a/cluster/charts/rook-ceph/templates/resources.yaml +++ b/cluster/charts/rook-ceph/templates/resources.yaml @@ -5738,11 +5738,175 @@ spec: description: Namespace is the RADOS namespace where NFS client recovery data is stored. type: string pool: - description: Pool is the RADOS pool where NFS client recovery data is stored. + description: Pool used to represent the Ganesha's pool name in version older than 16.2.7 As of Ceph Pacific 16.2.7, NFS Ganesha's pool name is hardcoded to ".nfs", so this setting will be ignored. type: string + poolSettings: + description: PoolSettings is the RADOS pool where Ganesha data is stored. + nullable: true + properties: + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object required: - namespace - - pool type: object server: description: Server is the Ganesha Server specification diff --git a/cluster/examples/kubernetes/ceph/crds.yaml b/cluster/examples/kubernetes/ceph/crds.yaml index cced62ad2bce2..4f209c29d2b78 100644 --- a/cluster/examples/kubernetes/ceph/crds.yaml +++ b/cluster/examples/kubernetes/ceph/crds.yaml @@ -5736,11 +5736,175 @@ spec: description: Namespace is the RADOS namespace where NFS client recovery data is stored. type: string pool: - description: Pool is the RADOS pool where NFS client recovery data is stored. + description: Pool used to represent the Ganesha's pool name in version older than 16.2.7 As of Ceph Pacific 16.2.7, NFS Ganesha's pool name is hardcoded to ".nfs", so this setting will be ignored. type: string + poolSettings: + description: PoolSettings is the RADOS pool where Ganesha data is stored. + nullable: true + properties: + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type) + maximum: 9 + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or zone if available) - technically also any type in the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes Secret names to add rbd-mirror or cephfs-mirror peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity of the snapshot. + type: string + path: + description: Path is the path to snapshot, only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high performance tier (for example SSD or NVME) for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low performance tier (for example HDDs) for remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in a replicated storage pool, including the object itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second or minute for the health check to run like 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object required: - namespace - - pool type: object server: description: Server is the Ganesha Server specification diff --git a/cluster/examples/kubernetes/ceph/nfs-test.yaml b/cluster/examples/kubernetes/ceph/nfs-test.yaml index 4d8ee6966053c..fbc7bed4bd361 100644 --- a/cluster/examples/kubernetes/ceph/nfs-test.yaml +++ b/cluster/examples/kubernetes/ceph/nfs-test.yaml @@ -6,10 +6,10 @@ metadata: spec: # rados settings aren't necessary in Ceph Versions equal to or greater than Pacific 16.2.7 rados: - # RADOS pool where NFS client recovery data is stored. - # In this example the data pool for the "myfs" filesystem is used. - # If using the object store example, the data pool would be "my-store.rgw.buckets.data". - pool: myfs-data0 + poolSettings: + failureDomain: osd + replicated: + size: 1 # RADOS namespace where NFS client recovery data is stored in the pool. namespace: nfs-ns # Settings for the NFS server diff --git a/cluster/examples/kubernetes/ceph/nfs.yaml b/cluster/examples/kubernetes/ceph/nfs.yaml index 86c99a2c53d2d..c9add20e87d00 100644 --- a/cluster/examples/kubernetes/ceph/nfs.yaml +++ b/cluster/examples/kubernetes/ceph/nfs.yaml @@ -5,11 +5,9 @@ metadata: namespace: rook-ceph # namespace:cluster spec: rados: - # RADOS pool where NFS client recovery data is stored, must be a replica pool. EC pools don't support omap which is required by ganesha. - # In this example the data pool for the "myfs" filesystem is used. Separate pool for storing ganesha recovery data is recommended. - # Due to this dashboard issue https://tracker.ceph.com/issues/46176. - # If using the object store example, the data pool would be "my-store.rgw.buckets.data". - pool: myfs-data0 + # The Ganesha pool spec. Must use replication. + poolSettings: + size: 3 # RADOS namespace where NFS client recovery data is stored in the pool. namespace: nfs-ns # Settings for the NFS server diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go index 3b92d4c3191f8..2a1368c1321dc 100755 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ b/pkg/apis/ceph.rook.io/v1/types.go @@ -1630,8 +1630,16 @@ type NFSGaneshaSpec struct { // GaneshaRADOSSpec represents the specification of a Ganesha RADOS object type GaneshaRADOSSpec struct { - // Pool is the RADOS pool where NFS client recovery data is stored. - Pool string `json:"pool"` + // Pool used to represent the Ganesha's pool name in version older than 16.2.7 + // As of Ceph Pacific 16.2.7, NFS Ganesha's pool name is hardcoded to ".nfs", so this + // setting will be ignored. + // +optional + Pool string `json:"pool,omitempty"` + + // PoolSettings is the RADOS pool where Ganesha data is stored. + // +nullable + // +optional + PoolSettings *PoolSpec `json:"poolSettings,omitempty"` // Namespace is the RADOS namespace where NFS client recovery data is stored. Namespace string `json:"namespace"` diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go index 5ad4e77bf558b..6fbbb522d9342 100644 --- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go +++ b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go @@ -1771,6 +1771,11 @@ func (in *FilesystemsSpec) DeepCopy() *FilesystemsSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GaneshaRADOSSpec) DeepCopyInto(out *GaneshaRADOSSpec) { *out = *in + if in.PoolSettings != nil { + in, out := &in.PoolSettings, &out.PoolSettings + *out = new(PoolSpec) + (*in).DeepCopyInto(*out) + } return } @@ -2213,7 +2218,7 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSGaneshaSpec) DeepCopyInto(out *NFSGaneshaSpec) { *out = *in - out.RADOS = in.RADOS + in.RADOS.DeepCopyInto(&out.RADOS) in.Server.DeepCopyInto(&out.Server) return } diff --git a/pkg/operator/ceph/nfs/controller.go b/pkg/operator/ceph/nfs/controller.go index dd949c9053ee9..b2630cb20d38b 100644 --- a/pkg/operator/ceph/nfs/controller.go +++ b/pkg/operator/ceph/nfs/controller.go @@ -269,7 +269,7 @@ func (r *ReconcileCephNFS) reconcile(request reconcile.Request) (reconcile.Resul if err := validateGanesha(r.context, r.clusterInfo, cephNFS); err != nil { return reconcile.Result{}, errors.Wrapf(err, "invalid ceph nfs %q arguments", cephNFS.Name) } - if err := fetchOrCreatePool(r.context, r.clusterInfo, cephNFS); err != nil { + if err := r.fetchOrCreatePool(cephNFS); err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to fetch or create RADOS pool") } diff --git a/pkg/operator/ceph/nfs/controller_test.go b/pkg/operator/ceph/nfs/controller_test.go index d1e470a48bf83..9dd6e5dd240e7 100644 --- a/pkg/operator/ceph/nfs/controller_test.go +++ b/pkg/operator/ceph/nfs/controller_test.go @@ -29,9 +29,7 @@ import ( "github.com/rook/rook/pkg/client/clientset/versioned/scheme" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/daemon/ceph/client" - "github.com/rook/rook/pkg/operator/ceph/cluster/mon" "github.com/rook/rook/pkg/operator/ceph/version" - cephver "github.com/rook/rook/pkg/operator/ceph/version" "github.com/rook/rook/pkg/operator/k8sutil" "github.com/rook/rook/pkg/operator/test" exectest "github.com/rook/rook/pkg/util/exec/test" @@ -259,131 +257,3 @@ func TestGetGaneshaConfigObject(t *testing.T) { logger.Infof("Config Object for Octopus is %s", res) assert.Equal(t, expectedName, res) } - -func TestFetchOrCreatePool(t *testing.T) { - ctx := context.TODO() - cephNFS := &cephv1.CephNFS{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cephv1.NFSGaneshaSpec{ - Server: cephv1.GaneshaServerSpec{ - Active: 1, - }, - }, - TypeMeta: controllerTypeMeta, - } - executor := &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - return "", nil - }, - } - clientset := test.New(t, 3) - c := &clusterd.Context{ - Executor: executor, - RookClientset: rookclient.NewSimpleClientset(), - Clientset: clientset, - } - // Mock clusterInfo - secrets := map[string][]byte{ - "fsid": []byte(name), - "mon-secret": []byte("monsecret"), - "admin-secret": []byte("adminsecret"), - } - secret := &v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook-ceph-mon", - Namespace: namespace, - }, - Data: secrets, - Type: k8sutil.RookType, - } - _, err := c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) - assert.NoError(t, err) - clusterInfo, _, _, err := mon.LoadClusterInfo(c, ctx, namespace) - if err != nil { - return - } - - err = fetchOrCreatePool(c, clusterInfo, cephNFS) - assert.NoError(t, err) - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[1] == "pool" && args[2] == "get" { - return "Error", errors.New("failed to get pool") - } - return "", nil - }, - } - - c.Executor = executor - err = fetchOrCreatePool(c, clusterInfo, cephNFS) - assert.Error(t, err) - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[1] == "pool" && args[2] == "get" { - return "Error", errors.New("failed to get pool: unrecognized pool") - } - return "", nil - }, - } - - c.Executor = executor - err = fetchOrCreatePool(c, clusterInfo, cephNFS) - assert.Error(t, err) - - clusterInfo.CephVersion = cephver.CephVersion{ - Major: 16, - Minor: 2, - Extra: 6, - } - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[1] == "pool" && args[2] == "get" { - return "Error", errors.New("failed to get pool: unrecognized pool") - } - return "", nil - }, - } - - c.Executor = executor - err = fetchOrCreatePool(c, clusterInfo, cephNFS) - assert.NoError(t, err) - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[1] == "pool" && args[2] == "get" { - return "Error", errors.New("failed to get pool: unrecognized pool") - } - if args[1] == "pool" && args[2] == "create" { - return "Error", errors.New("creating pool failed") - } - return "", nil - }, - } - - c.Executor = executor - err = fetchOrCreatePool(c, clusterInfo, cephNFS) - assert.Error(t, err) - - executor = &exectest.MockExecutor{ - MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { - if args[1] == "pool" && args[2] == "get" { - return "Error", errors.New("unrecognized pool") - } - if args[1] == "pool" && args[2] == "application" { - return "Error", errors.New("enabling pool failed") - } - return "", nil - }, - } - - c.Executor = executor - err = fetchOrCreatePool(c, clusterInfo, cephNFS) - assert.Error(t, err) - -} diff --git a/pkg/operator/ceph/nfs/nfs.go b/pkg/operator/ceph/nfs/nfs.go index 644aca1396fef..207a03c1b67ee 100644 --- a/pkg/operator/ceph/nfs/nfs.go +++ b/pkg/operator/ceph/nfs/nfs.go @@ -282,26 +282,27 @@ func validateGanesha(context *clusterd.Context, clusterInfo *cephclient.ClusterI } // create and enable default RADOS pool -func createDefaultNFSRADOSPool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, defaultRadosPoolName string) error { - args := []string{"osd", "pool", "create", defaultRadosPoolName} - _, err := cephclient.NewCephCommand(context, clusterInfo, args).Run() - if err != nil { - return err +func (r *ReconcileCephNFS) createDefaultNFSRADOSPool(n *cephv1.CephNFS) error { + poolName := n.Spec.RADOS.Pool + // Settings are not always declared and CreateReplicatedPoolForApp does not accept a pointer for + // the pool spec + if n.Spec.RADOS.PoolSettings == nil { + n.Spec.RADOS.PoolSettings = &cephv1.PoolSpec{} } - args = []string{"osd", "pool", "application", "enable", defaultRadosPoolName, "nfs"} - _, err = cephclient.NewCephCommand(context, clusterInfo, args).Run() + err := cephclient.CreateReplicatedPoolForApp(r.context, r.clusterInfo, r.cephClusterSpec, poolName, *n.Spec.RADOS.PoolSettings, cephclient.DefaultPGCount, "nfs") if err != nil { return err } + return nil } -func fetchOrCreatePool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, n *cephv1.CephNFS) error { +func (r *ReconcileCephNFS) fetchOrCreatePool(n *cephv1.CephNFS) error { // The existence of the pool provided in n.Spec.RADOS.Pool is necessary otherwise addRADOSConfigFile() will fail - _, err := cephclient.GetPoolDetails(context, clusterInfo, n.Spec.RADOS.Pool) + _, err := cephclient.GetPoolDetails(r.context, r.clusterInfo, n.Spec.RADOS.Pool) if err != nil { - if strings.Contains(err.Error(), "unrecognized pool") && clusterInfo.CephVersion.IsAtLeastPacific() { - err := createDefaultNFSRADOSPool(context, clusterInfo, n.Spec.RADOS.Pool) + if strings.Contains(err.Error(), "unrecognized pool") && r.clusterInfo.CephVersion.IsAtLeastPacific() { + err := r.createDefaultNFSRADOSPool(n) if err != nil { return errors.Wrapf(err, "failed to find %q pool and unable to create it", n.Spec.RADOS.Pool) } diff --git a/tests/scripts/validate_cluster.sh b/tests/scripts/validate_cluster.sh index 37c7074751f3b..4695154391462 100755 --- a/tests/scripts/validate_cluster.sh +++ b/tests/scripts/validate_cluster.sh @@ -88,7 +88,7 @@ function test_demo_pool { } function test_csi { - timeout 180 bash <<-'EOF' + timeout 360 bash <<-'EOF' until [[ "$(kubectl -n rook-ceph get pods --field-selector=status.phase=Running|grep -c ^csi-)" -eq 4 ]]; do echo "waiting for csi pods to be ready" sleep 5 @@ -96,6 +96,15 @@ function test_csi { EOF } +function test_nfs { + timeout 360 bash <<-'EOF' + until [[ "$(kubectl -n rook-ceph get pods --field-selector=status.phase=Running|grep -c ^rook-ceph-nfs-)" -eq 1 ]]; do + echo "waiting for nfs pods to be ready" + sleep 5 + done +EOF +} + ######## # MAIN # ######## @@ -104,7 +113,7 @@ test_demo_mon test_demo_mgr if [[ "$DAEMON_TO_VALIDATE" == "all" ]]; then - daemons_list="osd mds rgw rbd_mirror fs_mirror" + daemons_list="osd mds rgw rbd_mirror fs_mirror nfs" else # change commas to space comma_to_space=${DAEMON_TO_VALIDATE//,/ } @@ -139,6 +148,9 @@ for daemon in $daemons_list; do fs_mirror) test_demo_fs_mirror ;; + nfs) + test_nfs + ;; *) log "ERROR: unknown daemon to validate!" log "Available daemon are: mon mgr osd mds rgw rbd_mirror fs_mirror"