Skip to content

Commit

Permalink
ceph: remove RADOS options from CephNFS and use .nfs pool
Browse files Browse the repository at this point in the history
The NFS Module in Ceph has been changed to use only the .nfs RADOS
pool to manage NFS exports, this commit matches that change in Rook
by removing the RADOS option in the CephNFS custom resource and using
the .nfs RADOS pool and the CephNFS object's name as the RADOS namespace.

Closes: #8450
Signed-off-by: Joseph Sawaya <jsawaya@redhat.com>
  • Loading branch information
Joseph Sawaya committed Aug 5, 2021
1 parent cc16b60 commit 4335b6e
Show file tree
Hide file tree
Showing 14 changed files with 40 additions and 124 deletions.
18 changes: 0 additions & 18 deletions Documentation/ceph-nfs-crd.md
Expand Up @@ -25,15 +25,6 @@ metadata:
name: my-nfs
namespace: rook-ceph
spec:
rados:
# RADOS pool where NFS client recovery data and per-daemon configs are
# stored. In this example the data pool for the "myfs" filesystem is used.
# If using the object store example, the data pool would be
# "my-store.rgw.buckets.data". Note that this has nothing to do with where
# exported CephFS' or objectstores live.
pool: myfs-data0
# RADOS namespace where NFS client recovery data is stored in the pool.
namespace: nfs-ns
# Settings for the NFS server
server:
# the number of active NFS servers
Expand Down Expand Up @@ -84,15 +75,6 @@ ceph dashboard set-ganesha-clusters-rados-pool-namespace <ganesha_pool_name>[/<g
ceph dashboard set-ganesha-clusters-rados-pool-namespace <cluster_id>:<pool_name>[/<namespace>](,<cluster_id>:<pool_name>[/<namespace>])*
```

## NFS Settings

### RADOS Settings

* `pool`: The pool where ganesha recovery backend and supplemental configuration objects will be stored
* `namespace`: The namespace in `pool` where ganesha recovery backend and supplemental configuration objects will be stored

> **NOTE**: Don't use EC pools for NFS because ganesha uses omap in the recovery objects and grace db. EC pools do not support omap.
## EXPORT Block Configuration

All daemons within a cluster will share configuration with no exports defined, and that includes a RADOS object via:
Expand Down
14 changes: 0 additions & 14 deletions cluster/charts/rook-ceph/templates/resources.yaml
Expand Up @@ -5652,19 +5652,6 @@ spec:
spec:
description: NFSGaneshaSpec represents the spec of an nfs ganesha server
properties:
rados:
description: RADOS is the Ganesha RADOS specification
properties:
namespace:
description: Namespace is the RADOS namespace where NFS client recovery data is stored.
type: string
pool:
description: Pool is the RADOS pool where NFS client recovery data is stored.
type: string
required:
- namespace
- pool
type: object
server:
description: Server is the Ganesha Server specification
properties:
Expand Down Expand Up @@ -6256,7 +6243,6 @@ spec:
- active
type: object
required:
- rados
- server
type: object
status:
Expand Down
14 changes: 0 additions & 14 deletions cluster/examples/kubernetes/ceph/crds.yaml
Expand Up @@ -5649,19 +5649,6 @@ spec:
spec:
description: NFSGaneshaSpec represents the spec of an nfs ganesha server
properties:
rados:
description: RADOS is the Ganesha RADOS specification
properties:
namespace:
description: Namespace is the RADOS namespace where NFS client recovery data is stored.
type: string
pool:
description: Pool is the RADOS pool where NFS client recovery data is stored.
type: string
required:
- namespace
- pool
type: object
server:
description: Server is the Ganesha Server specification
properties:
Expand Down Expand Up @@ -6253,7 +6240,6 @@ spec:
- active
type: object
required:
- rados
- server
type: object
status:
Expand Down
7 changes: 0 additions & 7 deletions cluster/examples/kubernetes/ceph/nfs-test.yaml
Expand Up @@ -4,13 +4,6 @@ metadata:
name: my-nfs
namespace: rook-ceph # namespace:cluster
spec:
rados:
# RADOS pool where NFS client recovery data is stored.
# In this example the data pool for the "myfs" filesystem is used.
# If using the object store example, the data pool would be "my-store.rgw.buckets.data".
pool: myfs-data0
# RADOS namespace where NFS client recovery data is stored in the pool.
namespace: nfs-ns
# Settings for the NFS server
server:
# the number of active NFS servers
Expand Down
11 changes: 0 additions & 11 deletions pkg/apis/ceph.rook.io/v1/types.go
Expand Up @@ -1564,22 +1564,11 @@ type CephNFSList struct {

// NFSGaneshaSpec represents the spec of an nfs ganesha server
type NFSGaneshaSpec struct {
// RADOS is the Ganesha RADOS specification
RADOS GaneshaRADOSSpec `json:"rados"`

// Server is the Ganesha Server specification
Server GaneshaServerSpec `json:"server"`
}

// GaneshaRADOSSpec represents the specification of a Ganesha RADOS object
type GaneshaRADOSSpec struct {
// Pool is the RADOS pool where NFS client recovery data is stored.
Pool string `json:"pool"`

// Namespace is the RADOS namespace where NFS client recovery data is stored.
Namespace string `json:"namespace"`
}

// GaneshaServerSpec represents the specification of a Ganesha Server
type GaneshaServerSpec struct {
// The number of active Ganesha servers
Expand Down
17 changes: 0 additions & 17 deletions pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

16 changes: 8 additions & 8 deletions pkg/operator/ceph/nfs/config.go
Expand Up @@ -60,20 +60,20 @@ func getGaneshaConfigObject(n *cephv1.CephNFS, version cephver.CephVersion, name
}

func getRadosURL(n *cephv1.CephNFS, version cephver.CephVersion, name string) string {
url := fmt.Sprintf("rados://%s/", n.Spec.RADOS.Pool)
url := fmt.Sprintf("rados://%s/", radosPoolName)

if n.Spec.RADOS.Namespace != "" {
url += n.Spec.RADOS.Namespace + "/"
if n.Name != "" {
url += n.Name + "/"
}

url += getGaneshaConfigObject(n, version, name)
return url
}

func (r *ReconcileCephNFS) generateKeyring(n *cephv1.CephNFS, name string) error {
osdCaps := fmt.Sprintf("allow rw pool=%s", n.Spec.RADOS.Pool)
if n.Spec.RADOS.Namespace != "" {
osdCaps = fmt.Sprintf("%s namespace=%s", osdCaps, n.Spec.RADOS.Namespace)
osdCaps := fmt.Sprintf("allow rw pool=%s", radosPoolName)
if n.Name != "" {
osdCaps = fmt.Sprintf("%s namespace=%s", osdCaps, n.Name)
}

caps := []string{"mon", "allow r", "osd", osdCaps}
Expand Down Expand Up @@ -120,8 +120,8 @@ RADOS_KV {
ceph_conf = '` + cephclient.DefaultConfigFilePath() + `';
userid = ` + userID + `;
nodeid = ` + nodeID + `;
pool = "` + n.Spec.RADOS.Pool + `";
namespace = "` + n.Spec.RADOS.Namespace + `";
pool = "` + radosPoolName + `";
namespace = "` + n.Name + `";
}
RADOS_URLS {
Expand Down
4 changes: 0 additions & 4 deletions pkg/operator/ceph/nfs/controller_test.go
Expand Up @@ -90,10 +90,6 @@ func TestCephNFSController(t *testing.T) {
Namespace: namespace,
},
Spec: cephv1.NFSGaneshaSpec{
RADOS: cephv1.GaneshaRADOSSpec{
Pool: "foo",
Namespace: namespace,
},
Server: cephv1.GaneshaServerSpec{
Active: 1,
},
Expand Down
37 changes: 26 additions & 11 deletions pkg/operator/ceph/nfs/nfs.go
Expand Up @@ -37,6 +37,7 @@ import (

const (
ganeshaRadosGraceCmd = "ganesha-rados-grace"
radosPoolName = ".nfs"
)

var updateDeploymentAndWait = opmon.UpdateCephDeploymentAndWait
Expand Down Expand Up @@ -130,8 +131,8 @@ func (r *ReconcileCephNFS) addRADOSConfigFile(n *cephv1.CephNFS, name string) er
config := getGaneshaConfigObject(n, r.clusterInfo.CephVersion, name)
cmd := "rados"
args := []string{
"--pool", n.Spec.RADOS.Pool,
"--namespace", n.Spec.RADOS.Namespace,
"--pool", radosPoolName,
"--namespace", n.Name,
"--conf", cephclient.CephConfFilePath(r.context.ConfigDir, n.Namespace),
}
err := r.context.Executor.ExecuteCommand(cmd, append(args, "stat", config)...)
Expand Down Expand Up @@ -165,7 +166,7 @@ func (r *ReconcileCephNFS) removeServerFromDatabase(nfs *cephv1.CephNFS, name st
func (r *ReconcileCephNFS) runGaneshaRadosGrace(nfs *cephv1.CephNFS, name, action string) error {
nodeID := getNFSNodeID(nfs, name)
cmd := ganeshaRadosGraceCmd
args := []string{"--pool", nfs.Spec.RADOS.Pool, "--ns", nfs.Spec.RADOS.Namespace, action, nodeID}
args := []string{"--pool", radosPoolName, "--ns", nfs.Name, action, nodeID}
env := []string{fmt.Sprintf("CEPH_CONF=%s", cephclient.CephConfFilePath(r.context.ConfigDir, nfs.Namespace))}

return r.context.Executor.ExecuteCommandWithEnv(env, cmd, args...)
Expand Down Expand Up @@ -254,6 +255,21 @@ func instanceName(n *cephv1.CephNFS, name string) string {
return fmt.Sprintf("%s-%s-%s", AppName, n.Name, name)
}

// create and enable .nfs RADOS pool
func createNFSRADOSPool(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo) error {
args := []string{"osd", "pool", "create", ".nfs"}
_, err := cephclient.NewCephCommand(context, clusterInfo, args).Run()
if err != nil {
return err
}
args = []string{"osd", "pool", "application", "enable", ".nfs", "nfs"}
_, err = cephclient.NewCephCommand(context, clusterInfo, args).Run()
if err != nil {
return err
}
return nil
}

func validateGanesha(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, n *cephv1.CephNFS) error {
// core properties
if n.Name == "" {
Expand All @@ -263,20 +279,19 @@ func validateGanesha(context *clusterd.Context, clusterInfo *cephclient.ClusterI
return errors.New("missing namespace")
}

// Client recovery properties
if n.Spec.RADOS.Pool == "" {
return errors.New("missing RADOS.pool")
}

// Ganesha server properties
if n.Spec.Server.Active == 0 {
return errors.New("at least one active server required")
}

// The existence of the pool provided in n.Spec.RADOS.Pool is necessary otherwise addRADOSConfigFile() will fail
_, err := cephclient.GetPoolDetails(context, clusterInfo, n.Spec.RADOS.Pool)
// The existence of the .nfs pool is necessary otherwise addRADOSConfigFile() will fail
_, err := cephclient.GetPoolDetails(context, clusterInfo, radosPoolName)
if err != nil {
return errors.Wrapf(err, "pool %q not found", n.Spec.RADOS.Pool)
// Create the .nfs pool if it doesn't already exist
err := createNFSRADOSPool(context, clusterInfo)
if err != nil {
return errors.Wrapf(err, ".nfs pool not found and unable to create it ")
}
}

return nil
Expand Down
8 changes: 0 additions & 8 deletions pkg/operator/ceph/nfs/spec_test.go
Expand Up @@ -43,10 +43,6 @@ func TestDeploymentSpec(t *testing.T) {
Namespace: "rook-ceph-test-ns",
},
Spec: cephv1.NFSGaneshaSpec{
RADOS: cephv1.GaneshaRADOSSpec{
Pool: "myfs-data0",
Namespace: "nfs-test-ns",
},
Server: cephv1.GaneshaServerSpec{
Active: 3,
Resources: v1.ResourceRequirements{
Expand Down Expand Up @@ -78,10 +74,6 @@ func TestDeploymentSpec(t *testing.T) {
Namespace: namespace,
},
Spec: cephv1.NFSGaneshaSpec{
RADOS: cephv1.GaneshaRADOSSpec{
Pool: "foo",
Namespace: namespace,
},
Server: cephv1.GaneshaServerSpec{
Active: 1,
},
Expand Down
4 changes: 2 additions & 2 deletions tests/framework/clients/nfs.go
Expand Up @@ -39,10 +39,10 @@ func CreateNFSOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests
}

// Create creates a filesystem in Rook
func (n *NFSOperation) Create(namespace, name, pool string, daemonCount int) error {
func (n *NFSOperation) Create(namespace, name string, daemonCount int) error {

logger.Infof("creating the NFS daemons via CRD")
if err := n.k8sh.ResourceOperation("apply", n.manifests.GetNFS(name, pool, daemonCount)); err != nil {
if err := n.k8sh.ResourceOperation("apply", n.manifests.GetNFS(name, daemonCount)); err != nil {
return err
}

Expand Down
7 changes: 2 additions & 5 deletions tests/framework/installer/ceph_manifests.go
Expand Up @@ -40,7 +40,7 @@ type CephManifests interface {
GetBlockSnapshotClass(snapshotClassName, reclaimPolicy string) string
GetFileStorageSnapshotClass(snapshotClassName, reclaimPolicy string) string
GetFilesystem(name string, activeCount int) string
GetNFS(name, pool string, daemonCount int) string
GetNFS(name string, daemonCount int) string
GetRBDMirror(name string, daemonCount int) string
GetObjectStore(name string, replicaCount, port int, tlsEnable bool) string
GetObjectStoreUser(name, displayName, store string) string
Expand Down Expand Up @@ -371,16 +371,13 @@ spec:
}

// GetFilesystem returns the manifest to create a Rook Ceph NFS resource with the given config.
func (m *CephManifestsMaster) GetNFS(name, pool string, count int) string {
func (m *CephManifestsMaster) GetNFS(name string, count int) string {
return `apiVersion: ceph.rook.io/v1
kind: CephNFS
metadata:
name: ` + name + `
namespace: ` + m.settings.Namespace + `
spec:
rados:
pool: ` + pool + `
namespace: nfs-ns
server:
active: ` + strconv.Itoa(count)
}
Expand Down
5 changes: 1 addition & 4 deletions tests/framework/installer/ceph_manifests_v1.6.go
Expand Up @@ -322,16 +322,13 @@ spec:
}

// GetFilesystem returns the manifest to create a Rook Ceph NFS resource with the given config.
func (m *CephManifestsV1_6) GetNFS(name, pool string, count int) string {
func (m *CephManifestsV1_6) GetNFS(name string, count int) string {
return `apiVersion: ceph.rook.io/v1
kind: CephNFS
metadata:
name: ` + name + `
namespace: ` + m.settings.Namespace + `
spec:
rados:
pool: ` + pool + `
namespace: nfs-ns
server:
active: ` + strconv.Itoa(count)
}
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/ceph_base_file_test.go
Expand Up @@ -305,7 +305,7 @@ func runFileE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.S

func testNFSDaemons(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings, filesystemName string) {
name := "my-nfs"
err := helper.NFSClient.Create(settings.Namespace, name, filesystemName+"-data0", 2)
err := helper.NFSClient.Create(settings.Namespace, name, 2)
require.Nil(s.T(), err)

err = helper.NFSClient.Delete(settings.Namespace, name)
Expand Down

0 comments on commit 4335b6e

Please sign in to comment.