diff --git a/Documentation/ceph-filesystem-crd.md b/Documentation/ceph-filesystem-crd.md index f0a6a6d60ca37..44fbb5af7aeb4 100644 --- a/Documentation/ceph-filesystem-crd.md +++ b/Documentation/ceph-filesystem-crd.md @@ -86,6 +86,8 @@ spec: replicated: size: 3 dataPools: + - replicated: + size: 3 - erasureCoded: dataChunks: 2 codingChunks: 1 diff --git a/deploy/examples/csi/cephfs/storageclass-ec.yaml b/deploy/examples/csi/cephfs/storageclass-ec.yaml index 6c792812921a3..7f90d16fe0325 100644 --- a/deploy/examples/csi/cephfs/storageclass-ec.yaml +++ b/deploy/examples/csi/cephfs/storageclass-ec.yaml @@ -14,7 +14,7 @@ parameters: # Ceph pool into which the volume shall be created # Required for provisionVolume: "true" - pool: myfs-ec-data0 + pool: myfs-ec-data1 # The secrets contain Ceph admin credentials. These are generated automatically by the operator # in the same namespace as the cluster. diff --git a/deploy/examples/filesystem-ec.yaml b/deploy/examples/filesystem-ec.yaml index 205adfd13a253..7f693ff34142a 100644 --- a/deploy/examples/filesystem-ec.yaml +++ b/deploy/examples/filesystem-ec.yaml @@ -17,6 +17,8 @@ spec: size: 3 # The list of data pool specs dataPools: + - replicated: + size: 3 # You need at least three `bluestore` OSDs on different nodes for this config to work - erasureCoded: dataChunks: 2