Skip to content

Commit

Permalink
file: allow to create CephFS data pools with predefined names
Browse files Browse the repository at this point in the history
Add an ability to create data pools for CephFS with predefined
names.

Related-Issue: #9295
Signed-off-by: Denis Egorenko <degorenko@mirantis.com>
  • Loading branch information
degorenko committed Dec 8, 2021
1 parent 122ff34 commit 0f35118
Show file tree
Hide file tree
Showing 14 changed files with 120 additions and 48 deletions.
14 changes: 9 additions & 5 deletions Documentation/ceph-filesystem-crd.md
Expand Up @@ -31,7 +31,8 @@ spec:
replicated:
size: 3
dataPools:
- failureDomain: host
- name: myfs-replicated-pool
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: true
Expand Down Expand Up @@ -86,9 +87,11 @@ spec:
replicated:
size: 3
dataPools:
- replicated:
- name: myfs-ec-default-pool
replicated:
size: 3
- erasureCoded:
- name: myfs-ec-pool
erasureCoded:
dataChunks: 2
codingChunks: 1
metadataServer:
Expand Down Expand Up @@ -122,7 +125,8 @@ spec:
replicated:
size: 3
dataPools:
- failureDomain: host
- name: myfs-replicated-pool
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: true
Expand Down Expand Up @@ -187,7 +191,7 @@ See the official cephfs mirror documentation on [how to add a bootstrap peer](ht
The pools allow all of the settings defined in the Pool CRD spec. For more details, see the [Pool CRD](ceph-pool-crd.md) settings. In the example above, there must be at least three hosts (size 3) and at least eight devices (6 data + 2 coding chunks) in the cluster.

* `metadataPool`: The settings used to create the filesystem metadata pool. Must use replication.
* `dataPools`: The settings to create the filesystem data pools. If multiple pools are specified, Rook will add the pools to the filesystem. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). The data pools can use replication or erasure coding. If erasure coding pools are specified, the cluster must be running with bluestore enabled on the OSDs.
* `dataPools`: The settings to create the filesystem data pools. Optionally, pool name can be specified with `name` field. If multiple pools are specified, Rook will add the pools to the filesystem. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). The data pools can use replication or erasure coding. If erasure coding pools are specified, the cluster must be running with bluestore enabled on the OSDs.
* `preserveFilesystemOnDelete`: If it is set to 'true' the filesystem will remain when the
CephFilesystem resource is deleted. This is a security measure to avoid loss of data if the
CephFilesystem resource is deleted accidentally. The default value is 'false'. This option
Expand Down
5 changes: 3 additions & 2 deletions Documentation/ceph-filesystem.md
Expand Up @@ -36,7 +36,8 @@ spec:
replicated:
size: 3
dataPools:
- replicated:
- name: myfs-replicated-pool
replicated:
size: 3
preserveFilesystemOnDelete: true
metadataServer:
Expand Down Expand Up @@ -98,7 +99,7 @@ parameters:

# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: myfs-data0
pool: myfs-replicated-pool

# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
Expand Down
9 changes: 5 additions & 4 deletions deploy/charts/rook-ceph/templates/resources.yaml
Expand Up @@ -4808,9 +4808,9 @@ spec:
description: FilesystemSpec represents the spec of a file system
properties:
dataPools:
description: The data pool settings
description: The data pool settings, with optional predefined pool name.
items:
description: PoolSpec represents the spec of ceph pool
description: NamedPoolSpec represents the named ceph pool spec
properties:
compressionMode:
description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
Expand Down Expand Up @@ -4890,6 +4890,9 @@ spec:
type: object
type: array
type: object
name:
description: Name of pool
type: string
parameters:
additionalProperties:
type: string
Expand Down Expand Up @@ -4971,7 +4974,6 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
type: object
nullable: true
type: array
metadataPool:
description: The metadata pool settings
Expand Down Expand Up @@ -5800,7 +5802,6 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- dataPools
- metadataPool
- metadataServer
type: object
Expand Down
9 changes: 5 additions & 4 deletions deploy/examples/crds.yaml
Expand Up @@ -4805,9 +4805,9 @@ spec:
description: FilesystemSpec represents the spec of a file system
properties:
dataPools:
description: The data pool settings
description: The data pool settings, with optional predefined pool name.
items:
description: PoolSpec represents the spec of ceph pool
description: NamedPoolSpec represents the named ceph pool spec
properties:
compressionMode:
description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
Expand Down Expand Up @@ -4887,6 +4887,9 @@ spec:
type: object
type: array
type: object
name:
description: Name of pool
type: string
parameters:
additionalProperties:
type: string
Expand Down Expand Up @@ -4968,7 +4971,6 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
type: object
nullable: true
type: array
metadataPool:
description: The metadata pool settings
Expand Down Expand Up @@ -5797,7 +5799,6 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- dataPools
- metadataPool
- metadataServer
type: object
Expand Down
3 changes: 1 addition & 2 deletions deploy/examples/csi/cephfs/storageclass-ec.yaml
Expand Up @@ -14,10 +14,9 @@ parameters:

# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"

# For erasure coded pools, we have to create a replicated pool as the default data pool and an erasure-coded
# pool as a secondary pool.
pool: myfs-ec-data1
pool: myfs-ec-pool

# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/csi/cephfs/storageclass.yaml
Expand Up @@ -14,7 +14,7 @@ parameters:

# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: myfs-data0
pool: myfs-replicated-pool

# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
Expand Down
3 changes: 2 additions & 1 deletion deploy/examples/filesystem-ec.yaml
Expand Up @@ -22,7 +22,8 @@ spec:
- replicated:
size: 3
# You need at least three `bluestore` OSDs on different nodes for this config to work
- erasureCoded:
- name: myfs-ec-pool
erasureCoded:
dataChunks: 2
codingChunks: 1
# Inline compression mode for the data pool
Expand Down
3 changes: 2 additions & 1 deletion deploy/examples/filesystem-test.yaml
Expand Up @@ -14,7 +14,8 @@ spec:
size: 1
requireSafeReplicaSize: false
dataPools:
- failureDomain: osd
- name: myfs-replicated-pool
failureDomain: osd
replicated:
size: 1
requireSafeReplicaSize: false
Expand Down
3 changes: 2 additions & 1 deletion deploy/examples/filesystem.yaml
Expand Up @@ -25,7 +25,8 @@ spec:
#target_size_ratio: ".5"
# The list of data pool specs. Can use replication or erasure coding.
dataPools:
- failureDomain: host
- name: myfs-replicated-pool
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
Expand Down
14 changes: 11 additions & 3 deletions pkg/apis/ceph.rook.io/v1/types.go
Expand Up @@ -639,6 +639,14 @@ type PoolSpec struct {
Quotas QuotaSpec `json:"quotas,omitempty"`
}

// NamedPoolSpec represents the named ceph pool spec
type NamedPoolSpec struct {
// Name of pool
Name string `json:"name,omitempty"`
// PoolSpec represents the spec of ceph pool
PoolSpec `json:",inline"`
}

// MirrorHealthCheckSpec represents the health specification of a Ceph Storage Pool mirror
type MirrorHealthCheckSpec struct {
// +optional
Expand Down Expand Up @@ -964,9 +972,9 @@ type FilesystemSpec struct {
// +nullable
MetadataPool PoolSpec `json:"metadataPool"`

// The data pool settings
// +nullable
DataPools []PoolSpec `json:"dataPools"`
// The data pool settings, with optional predefined pool name.
// +optional
DataPools []NamedPoolSpec `json:"dataPools"`

// Preserve pools on filesystem deletion
// +optional
Expand Down
19 changes: 18 additions & 1 deletion pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion pkg/operator/ceph/disruption/clusterdisruption/pools.go
Expand Up @@ -57,7 +57,9 @@ func (r *ReconcileClusterDisruption) processPools(request reconcile.Request) (*c
poolCount += len(cephFilesystemList.Items)
for _, cephFilesystem := range cephFilesystemList.Items {
poolSpecs = append(poolSpecs, cephFilesystem.Spec.MetadataPool)
poolSpecs = append(poolSpecs, cephFilesystem.Spec.DataPools...)
for _, pool := range cephFilesystem.Spec.DataPools {
poolSpecs = append(poolSpecs, pool.PoolSpec)
}

}

Expand Down
14 changes: 9 additions & 5 deletions pkg/operator/ceph/file/filesystem.go
Expand Up @@ -134,7 +134,7 @@ func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.Clust
return errors.Wrap(err, "invalid metadata pool")
}
for _, p := range f.Spec.DataPools {
localpoolSpec := p
localpoolSpec := p.PoolSpec
if err := pool.ValidatePoolSpec(context, clusterInfo, clusterSpec, &localpoolSpec); err != nil {
return errors.Wrap(err, "Invalid data pool")
}
Expand Down Expand Up @@ -163,7 +163,7 @@ func SetPoolSize(f *Filesystem, context *clusterd.Context, clusterInfo *cephclie
dataPoolNames := generateDataPoolNames(f, spec)
for i, pool := range spec.DataPools {
poolName := dataPoolNames[i]
err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "")
err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool.PoolSpec, "")
if err != nil {
return errors.Wrapf(err, "failed to update datapool %q", poolName)
}
Expand Down Expand Up @@ -243,7 +243,7 @@ func (f *Filesystem) doFilesystemCreate(context *clusterd.Context, clusterInfo *
for i, pool := range spec.DataPools {
poolName := dataPoolNames[i]
if _, poolFound := reversedPoolMap[poolName]; !poolFound {
err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "")
err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool.PoolSpec, "")
if err != nil {
return errors.Wrapf(err, "failed to create data pool %q", poolName)
}
Expand Down Expand Up @@ -278,10 +278,14 @@ func downFilesystem(context *clusterd.Context, clusterInfo *cephclient.ClusterIn
}

// generateDataPoolName generates DataPool name by prefixing the filesystem name to the constant DataPoolSuffix
// or get predefined name from spec
func generateDataPoolNames(f *Filesystem, spec cephv1.FilesystemSpec) []string {
var dataPoolNames []string
for i := range spec.DataPools {
poolName := fmt.Sprintf("%s-%s%d", f.Name, dataPoolSuffix, i)
for i, pool := range spec.DataPools {
poolName := pool.Name
if poolName == "" {
poolName = fmt.Sprintf("%s-%s%d", f.Name, dataPoolSuffix, i)
}
dataPoolNames = append(dataPoolNames, poolName)
}
return dataPoolNames
Expand Down

0 comments on commit 0f35118

Please sign in to comment.