From 1dde701fab5c2bb801472cf8f77a8d02805a286f Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Mon, 2 Aug 2021 11:06:07 +0530 Subject: [PATCH] mds: create EC pool as secondary pool When creating EC fs, create replicated pool as primary pool and ec pool as secondary pool, creating ec pool as primary is not encouraged and it will lead to failure. Also, changing the pool name in storageclass-ec file. Closes: https://github.com/rook/rook/issues/8210 Signed-off-by: subhamkrai --- Documentation/ceph-filesystem-crd.md | 5 +++-- .../examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml | 2 +- cluster/examples/kubernetes/ceph/filesystem-ec.yaml | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Documentation/ceph-filesystem-crd.md b/Documentation/ceph-filesystem-crd.md index 4b2b533cc05d2..03c512b6f684c 100644 --- a/Documentation/ceph-filesystem-crd.md +++ b/Documentation/ceph-filesystem-crd.md @@ -86,6 +86,8 @@ spec: replicated: size: 3 dataPools: + - replicated: + size: 3 - erasureCoded: dataChunks: 2 codingChunks: 1 @@ -94,8 +96,7 @@ spec: activeStandby: true ``` -(These definitions can also be found in the [`filesystem-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/filesystem-ec.yaml) file. -Also see an example in the [`storageclass-ec.yaml`](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml) for how to configure the volume.) +**IMPORTANT**: For erasure coded pools, we have to create a replicated pool as the default data pool and an erasure-coded pool as a secondary pool. ### Mirroring diff --git a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml index 6c792812921a3..7f90d16fe0325 100644 --- a/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml +++ b/cluster/examples/kubernetes/ceph/csi/cephfs/storageclass-ec.yaml @@ -14,7 +14,7 @@ parameters: # Ceph pool into which the volume shall be created # Required for provisionVolume: "true" - pool: myfs-ec-data0 + pool: myfs-ec-data1 # The secrets contain Ceph admin credentials. These are generated automatically by the operator # in the same namespace as the cluster. diff --git a/cluster/examples/kubernetes/ceph/filesystem-ec.yaml b/cluster/examples/kubernetes/ceph/filesystem-ec.yaml index 205adfd13a253..7f693ff34142a 100644 --- a/cluster/examples/kubernetes/ceph/filesystem-ec.yaml +++ b/cluster/examples/kubernetes/ceph/filesystem-ec.yaml @@ -17,6 +17,8 @@ spec: size: 3 # The list of data pool specs dataPools: + - replicated: + size: 3 # You need at least three `bluestore` OSDs on different nodes for this config to work - erasureCoded: dataChunks: 2