Skip to content

Commit

Permalink
file: allow to create CephFS data pools with predefined names
Browse files Browse the repository at this point in the history
Add an ability to create data pools for CephFS with predefined
names.

Related-Issue: #9295
Signed-off-by: Denis Egorenko <degorenko@mirantis.com>
  • Loading branch information
degorenko committed Dec 14, 2021
1 parent 9299e32 commit 3ff681e
Show file tree
Hide file tree
Showing 17 changed files with 144 additions and 46 deletions.
15 changes: 10 additions & 5 deletions Documentation/ceph-filesystem-crd.md
Expand Up @@ -31,7 +31,8 @@ spec:
replicated:
size: 3
dataPools:
- failureDomain: host
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: true
Expand Down Expand Up @@ -86,9 +87,11 @@ spec:
replicated:
size: 3
dataPools:
- replicated:
- name: default
replicated:
size: 3
- erasureCoded:
- name: erasurecoded
erasureCoded:
dataChunks: 2
codingChunks: 1
metadataServer:
Expand Down Expand Up @@ -122,7 +125,8 @@ spec:
replicated:
size: 3
dataPools:
- failureDomain: host
- name: replicated
failureDomain: host
replicated:
size: 3
preserveFilesystemOnDelete: true
Expand Down Expand Up @@ -187,7 +191,8 @@ See the official cephfs mirror documentation on [how to add a bootstrap peer](ht
The pools allow all of the settings defined in the Pool CRD spec. For more details, see the [Pool CRD](ceph-pool-crd.md) settings. In the example above, there must be at least three hosts (size 3) and at least eight devices (6 data + 2 coding chunks) in the cluster.

* `metadataPool`: The settings used to create the filesystem metadata pool. Must use replication.
* `dataPools`: The settings to create the filesystem data pools. If multiple pools are specified, Rook will add the pools to the filesystem. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). The data pools can use replication or erasure coding. If erasure coding pools are specified, the cluster must be running with bluestore enabled on the OSDs.
* `dataPools`: The settings to create the filesystem data pools. Optionally (and we highly recommend), a pool name can be specified with the `name` field to override the default generated name; see more below. If multiple pools are specified, Rook will add the pools to the filesystem. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). The data pools can use replication or erasure coding. If erasure coding pools are specified, the cluster must be running with bluestore enabled on the OSDs.
* `name`: (optional, and highly recommended) Override the default generated name of the pool. The final pool name will consist of the filesystem name and pool name, e.g., `<fsName>-<poolName>`. We highly recommend to specify `name` to prevent issues that can arise from modifying the spec in a way that causes Rook to lose the original pool ordering.
* `preserveFilesystemOnDelete`: If it is set to 'true' the filesystem will remain when the
CephFilesystem resource is deleted. This is a security measure to avoid loss of data if the
CephFilesystem resource is deleted accidentally. The default value is 'false'. This option
Expand Down
5 changes: 3 additions & 2 deletions Documentation/ceph-filesystem.md
Expand Up @@ -36,7 +36,8 @@ spec:
replicated:
size: 3
dataPools:
- replicated:
- name: replicated
replicated:
size: 3
preserveFilesystemOnDelete: true
metadataServer:
Expand Down Expand Up @@ -98,7 +99,7 @@ parameters:

# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: myfs-data0
pool: myfs-replicated

# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
Expand Down
7 changes: 5 additions & 2 deletions deploy/charts/rook-ceph/templates/resources.yaml
Expand Up @@ -4808,9 +4808,9 @@ spec:
description: FilesystemSpec represents the spec of a file system
properties:
dataPools:
description: The data pool settings
description: The data pool settings, with optional predefined pool name.
items:
description: PoolSpec represents the spec of ceph pool
description: NamedPoolSpec represents the named ceph pool spec
properties:
compressionMode:
description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
Expand Down Expand Up @@ -4890,6 +4890,9 @@ spec:
type: object
type: array
type: object
name:
description: Name of the pool
type: string
parameters:
additionalProperties:
type: string
Expand Down
7 changes: 5 additions & 2 deletions deploy/examples/crds.yaml
Expand Up @@ -4805,9 +4805,9 @@ spec:
description: FilesystemSpec represents the spec of a file system
properties:
dataPools:
description: The data pool settings
description: The data pool settings, with optional predefined pool name.
items:
description: PoolSpec represents the spec of ceph pool
description: NamedPoolSpec represents the named ceph pool spec
properties:
compressionMode:
description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters'
Expand Down Expand Up @@ -4887,6 +4887,9 @@ spec:
type: object
type: array
type: object
name:
description: Name of the pool
type: string
parameters:
additionalProperties:
type: string
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/create-external-cluster-resources.py
Expand Up @@ -83,7 +83,7 @@ def _init_cmd_output_map(self):
self.cmd_names['mgr services'] = '''{"format": "json", "prefix": "mgr services"}'''
# all the commands and their output
self.cmd_output_map[self.cmd_names['fs ls']
] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-data0"]}]'''
] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-replicated"]}]'''
self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}'''
self.cmd_output_map[self.cmd_names['mgr services']
] = '''{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}'''
Expand Down
3 changes: 1 addition & 2 deletions deploy/examples/csi/cephfs/storageclass-ec.yaml
Expand Up @@ -14,10 +14,9 @@ parameters:

# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"

# For erasure coded pools, we have to create a replicated pool as the default data pool and an erasure-coded
# pool as a secondary pool.
pool: myfs-ec-data1
pool: myfs-ec-erasurecoded

# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/csi/cephfs/storageclass.yaml
Expand Up @@ -14,7 +14,7 @@ parameters:

# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true"
pool: myfs-data0
pool: myfs-replicated

# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
Expand Down
3 changes: 2 additions & 1 deletion deploy/examples/filesystem-ec.yaml
Expand Up @@ -22,7 +22,8 @@ spec:
- replicated:
size: 3
# You need at least three `bluestore` OSDs on different nodes for this config to work
- erasureCoded:
- name: erasurecoded
erasureCoded:
dataChunks: 2
codingChunks: 1
# Inline compression mode for the data pool
Expand Down
3 changes: 2 additions & 1 deletion deploy/examples/filesystem-test.yaml
Expand Up @@ -14,7 +14,8 @@ spec:
size: 1
requireSafeReplicaSize: false
dataPools:
- failureDomain: osd
- name: replicated
failureDomain: osd
replicated:
size: 1
requireSafeReplicaSize: false
Expand Down
3 changes: 2 additions & 1 deletion deploy/examples/filesystem.yaml
Expand Up @@ -25,7 +25,8 @@ spec:
#target_size_ratio: ".5"
# The list of data pool specs. Can use replication or erasure coding.
dataPools:
- failureDomain: host
- name: replicated
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/nfs-test.yaml
Expand Up @@ -6,7 +6,7 @@ metadata:
spec:
# this setting is ignored for Ceph v16+
rados:
pool: myfs-data0
pool: myfs-replicated
namespace: nfs-ns
# Settings for the NFS server
server:
Expand Down
2 changes: 1 addition & 1 deletion deploy/examples/nfs.yaml
Expand Up @@ -10,7 +10,7 @@ spec:
# In this example the data pool for the "myfs" filesystem is used.
# If using the object store example, the data pool would be "my-store.rgw.buckets.data".
# Note that this has nothing to do with where exported file systems or object stores live.
pool: myfs-data0
pool: myfs-replicated
# RADOS namespace where NFS client recovery data is stored in the pool.
namespace: nfs-ns

Expand Down
12 changes: 10 additions & 2 deletions pkg/apis/ceph.rook.io/v1/types.go
Expand Up @@ -639,6 +639,14 @@ type PoolSpec struct {
Quotas QuotaSpec `json:"quotas,omitempty"`
}

// NamedPoolSpec represents the named ceph pool spec
type NamedPoolSpec struct {
// Name of the pool
Name string `json:"name,omitempty"`
// PoolSpec represents the spec of ceph pool
PoolSpec `json:",inline"`
}

// MirrorHealthCheckSpec represents the health specification of a Ceph Storage Pool mirror
type MirrorHealthCheckSpec struct {
// +optional
Expand Down Expand Up @@ -964,9 +972,9 @@ type FilesystemSpec struct {
// +nullable
MetadataPool PoolSpec `json:"metadataPool"`

// The data pool settings
// The data pool settings, with optional predefined pool name.
// +nullable
DataPools []PoolSpec `json:"dataPools"`
DataPools []NamedPoolSpec `json:"dataPools"`

// Preserve pools on filesystem deletion
// +optional
Expand Down
19 changes: 18 additions & 1 deletion pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion pkg/operator/ceph/disruption/clusterdisruption/pools.go
Expand Up @@ -57,7 +57,9 @@ func (r *ReconcileClusterDisruption) processPools(request reconcile.Request) (*c
poolCount += len(cephFilesystemList.Items)
for _, cephFilesystem := range cephFilesystemList.Items {
poolSpecs = append(poolSpecs, cephFilesystem.Spec.MetadataPool)
poolSpecs = append(poolSpecs, cephFilesystem.Spec.DataPools...)
for _, pool := range cephFilesystem.Spec.DataPools {
poolSpecs = append(poolSpecs, pool.PoolSpec)
}

}

Expand Down
16 changes: 11 additions & 5 deletions pkg/operator/ceph/file/filesystem.go
Expand Up @@ -134,7 +134,7 @@ func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.Clust
return errors.Wrap(err, "invalid metadata pool")
}
for _, p := range f.Spec.DataPools {
localpoolSpec := p
localpoolSpec := p.PoolSpec
if err := pool.ValidatePoolSpec(context, clusterInfo, clusterSpec, &localpoolSpec); err != nil {
return errors.Wrap(err, "Invalid data pool")
}
Expand Down Expand Up @@ -163,7 +163,7 @@ func SetPoolSize(f *Filesystem, context *clusterd.Context, clusterInfo *cephclie
dataPoolNames := generateDataPoolNames(f, spec)
for i, pool := range spec.DataPools {
poolName := dataPoolNames[i]
err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "")
err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool.PoolSpec, "")
if err != nil {
return errors.Wrapf(err, "failed to update datapool %q", poolName)
}
Expand Down Expand Up @@ -243,7 +243,7 @@ func (f *Filesystem) doFilesystemCreate(context *clusterd.Context, clusterInfo *
for i, pool := range spec.DataPools {
poolName := dataPoolNames[i]
if _, poolFound := reversedPoolMap[poolName]; !poolFound {
err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "")
err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool.PoolSpec, "")
if err != nil {
return errors.Wrapf(err, "failed to create data pool %q", poolName)
}
Expand Down Expand Up @@ -278,10 +278,16 @@ func downFilesystem(context *clusterd.Context, clusterInfo *cephclient.ClusterIn
}

// generateDataPoolName generates DataPool name by prefixing the filesystem name to the constant DataPoolSuffix
// or get predefined name from spec
func generateDataPoolNames(f *Filesystem, spec cephv1.FilesystemSpec) []string {
var dataPoolNames []string
for i := range spec.DataPools {
poolName := fmt.Sprintf("%s-%s%d", f.Name, dataPoolSuffix, i)
for i, pool := range spec.DataPools {
poolName := ""
if pool.Name == "" {
poolName = fmt.Sprintf("%s-%s%d", f.Name, dataPoolSuffix, i)
} else {
poolName = fmt.Sprintf("%s-%s", f.Name, pool.Name)
}
dataPoolNames = append(dataPoolNames, poolName)
}
return dataPoolNames
Expand Down

0 comments on commit 3ff681e

Please sign in to comment.