Skip to content

Commit

Permalink
Merge pull request #9104 from BlaineEXE/nfs-restart-with-configmap
Browse files Browse the repository at this point in the history
nfs: restart nfs servers when configmap is updated
  • Loading branch information
leseb committed Nov 26, 2021
2 parents 2519018 + 5a7dee2 commit bd962e3
Show file tree
Hide file tree
Showing 4 changed files with 241 additions and 13 deletions.
32 changes: 21 additions & 11 deletions pkg/operator/ceph/nfs/nfs.go
Expand Up @@ -18,6 +18,7 @@ limitations under the License.
package nfs

import (
"crypto/sha256"
"fmt"

"github.com/banzaicloud/k8s-objectmatcher/patch"
Expand Down Expand Up @@ -45,17 +46,18 @@ const (
var updateDeploymentAndWait = opmon.UpdateCephDeploymentAndWait

type daemonConfig struct {
ID string // letter ID of daemon (e.g., a, b, c, ...)
ConfigConfigMap string // name of configmap holding config
DataPathMap *config.DataPathMap // location to store data in container
ID string // letter ID of daemon (e.g., a, b, c, ...)
ConfigConfigMap string // name of configmap holding config
ConfigConfigMapHash string // hash of configmap holding config
DataPathMap *config.DataPathMap // location to store data in container
}

// Create the ganesha server
func (r *ReconcileCephNFS) upCephNFS(n *cephv1.CephNFS) error {
for i := 0; i < n.Spec.Server.Active; i++ {
id := k8sutil.IndexToName(i)

configName, err := r.createConfigMap(n, id)
configName, configHash, err := r.createConfigMap(n, id)
if err != nil {
return errors.Wrap(err, "failed to create config")
}
Expand All @@ -66,8 +68,9 @@ func (r *ReconcileCephNFS) upCephNFS(n *cephv1.CephNFS) error {
}

cfg := daemonConfig{
ID: id,
ConfigConfigMap: configName,
ID: id,
ConfigConfigMap: configName,
ConfigConfigMapHash: configHash,
DataPathMap: &config.DataPathMap{
HostDataDir: "", // nfs daemon does not store data on host, ...
ContainerDataDir: cephclient.DefaultConfigDir, // does share data in containers using emptyDir, ...
Expand Down Expand Up @@ -190,28 +193,35 @@ func (r *ReconcileCephNFS) generateConfigMap(n *cephv1.CephNFS, name string) *v1
return configMap
}

func (r *ReconcileCephNFS) createConfigMap(n *cephv1.CephNFS, name string) (string, error) {
// return the name of the configmap, plus a hash of the data
func (r *ReconcileCephNFS) createConfigMap(n *cephv1.CephNFS, name string) (string, string, error) {
// Generate configMap
configMap := r.generateConfigMap(n, name)

// Set owner reference
err := controllerutil.SetControllerReference(n, configMap, r.scheme)
if err != nil {
return "", errors.Wrapf(err, "failed to set owner reference for ceph ganesha configmap %q", configMap.Name)
return "", "", errors.Wrapf(err, "failed to set owner reference for ceph ganesha configmap %q", configMap.Name)
}

if _, err := r.context.Clientset.CoreV1().ConfigMaps(n.Namespace).Create(r.opManagerContext, configMap, metav1.CreateOptions{}); err != nil {
if !kerrors.IsAlreadyExists(err) {
return "", errors.Wrap(err, "failed to create ganesha config map")
return "", "", errors.Wrap(err, "failed to create ganesha config map")
}

logger.Debugf("updating config map %q that already exists", configMap.Name)
if _, err = r.context.Clientset.CoreV1().ConfigMaps(n.Namespace).Update(r.opManagerContext, configMap, metav1.UpdateOptions{}); err != nil {
return "", errors.Wrap(err, "failed to update ganesha config map")
return "", "", errors.Wrap(err, "failed to update ganesha config map")
}
}

return configMap.Name, nil
h := sha256.New()
if _, err := h.Write([]byte(fmt.Sprintf("%v", configMap.Data))); err != nil {
return "", "", errors.Wrapf(err, "failed to get hash of ganesha config map")
}
configHash := fmt.Sprintf("%x", h.Sum(nil))

return configMap.Name, configHash, nil
}

// Down scale the ganesha server
Expand Down
209 changes: 209 additions & 0 deletions pkg/operator/ceph/nfs/nfs_test.go
@@ -0,0 +1,209 @@
/*
Copyright 2021 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

// Package nfs manages NFS ganesha servers for Ceph
package nfs

import (
"context"
"testing"

"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/client/clientset/versioned/scheme"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
cephver "github.com/rook/rook/pkg/operator/ceph/version"
exectest "github.com/rook/rook/pkg/util/exec/test"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sfake "k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)

func TestReconcileCephNFS_createConfigMap(t *testing.T) {
s := scheme.Scheme

clientset := k8sfake.NewSimpleClientset()

c := &clusterd.Context{
Executor: &exectest.MockExecutor{},
Clientset: clientset,
}

r := &ReconcileCephNFS{
scheme: s,
context: c,
clusterInfo: &cephclient.ClusterInfo{
FSID: "myfsid",
CephVersion: cephver.Octopus,
},
cephClusterSpec: &cephv1.ClusterSpec{
CephVersion: cephv1.CephVersionSpec{
Image: "quay.io/ceph/ceph:v15",
},
},
}

nfs := &cephv1.CephNFS{
ObjectMeta: metav1.ObjectMeta{
Name: "my-nfs",
Namespace: "rook-ceph-test-ns",
},
Spec: cephv1.NFSGaneshaSpec{
RADOS: cephv1.GaneshaRADOSSpec{
Pool: "myfs-data0",
Namespace: "nfs-test-ns",
},
Server: cephv1.GaneshaServerSpec{
Active: 3,
},
},
}

t.Run("running multiple times should give the same hash", func(t *testing.T) {
cmName, hash1, err := r.createConfigMap(nfs, "a")
assert.NoError(t, err)
assert.Equal(t, "rook-ceph-nfs-my-nfs-a", cmName)
_, err = r.context.Clientset.CoreV1().ConfigMaps("rook-ceph-test-ns").Get(context.TODO(), cmName, metav1.GetOptions{})
assert.NoError(t, err)

_, hash2, err := r.createConfigMap(nfs, "a")
assert.NoError(t, err)
_, err = r.context.Clientset.CoreV1().ConfigMaps("rook-ceph-test-ns").Get(context.TODO(), cmName, metav1.GetOptions{})
assert.NoError(t, err)

assert.Equal(t, hash1, hash2)
})

t.Run("running with different IDs should give different hashes", func(t *testing.T) {
cmName, hash1, err := r.createConfigMap(nfs, "a")
assert.NoError(t, err)
assert.Equal(t, "rook-ceph-nfs-my-nfs-a", cmName)
_, err = r.context.Clientset.CoreV1().ConfigMaps("rook-ceph-test-ns").Get(context.TODO(), cmName, metav1.GetOptions{})
assert.NoError(t, err)

_, hash2, err := r.createConfigMap(nfs, "b")
assert.NoError(t, err)
_, err = r.context.Clientset.CoreV1().ConfigMaps("rook-ceph-test-ns").Get(context.TODO(), cmName, metav1.GetOptions{})
assert.NoError(t, err)

assert.NotEqual(t, hash1, hash2)
})

t.Run("running with different configs should give different hashes", func(t *testing.T) {
cmName, hash1, err := r.createConfigMap(nfs, "a")
assert.NoError(t, err)
assert.Equal(t, "rook-ceph-nfs-my-nfs-a", cmName)
_, err = r.context.Clientset.CoreV1().ConfigMaps("rook-ceph-test-ns").Get(context.TODO(), cmName, metav1.GetOptions{})
assert.NoError(t, err)

nfs2 := nfs.DeepCopy()
nfs2.Name = "nfs-two"
_, hash2, err := r.createConfigMap(nfs2, "a")
assert.NoError(t, err)
_, err = r.context.Clientset.CoreV1().ConfigMaps("rook-ceph-test-ns").Get(context.TODO(), cmName, metav1.GetOptions{})
assert.NoError(t, err)

assert.NotEqual(t, hash1, hash2)
})
}

func TestReconcileCephNFS_upCephNFS(t *testing.T) {
ns := "up-ceph-ns-namespace"

s := scheme.Scheme

clientset := k8sfake.NewSimpleClientset()

executor := &exectest.MockExecutor{
MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) {
logger.Infof("executing command: %s %+v", command, args)
if args[0] == "auth" {
if args[1] == "get-or-create-key" {
return "{\"key\":\"mysecurekey\"}", nil
}
}
panic(errors.Errorf("unhandled command %s %v", command, args))
},
}

client := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects().Build()

c := &clusterd.Context{
Executor: executor,
Client: client,
Clientset: clientset,
}

r := &ReconcileCephNFS{
scheme: s,
context: c,
clusterInfo: &cephclient.ClusterInfo{
FSID: "myfsid",
CephVersion: cephver.Octopus,
Context: context.TODO(),
Namespace: ns,
},
cephClusterSpec: &cephv1.ClusterSpec{
CephVersion: cephv1.CephVersionSpec{
Image: "quay.io/ceph/ceph:v15",
},
},
}

nfs := &cephv1.CephNFS{
ObjectMeta: metav1.ObjectMeta{
Name: "my-nfs",
Namespace: ns,
},
Spec: cephv1.NFSGaneshaSpec{
RADOS: cephv1.GaneshaRADOSSpec{
Pool: "myfs-data0",
Namespace: "nfs-test-ns",
},
Server: cephv1.GaneshaServerSpec{
Active: 2,
},
},
}

err := r.upCephNFS(nfs)
assert.NoError(t, err)

deps, err := r.context.Clientset.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
assert.Len(t, deps.Items, 2)
names := []string{}
hashes := []string{}
for _, dep := range deps.Items {
names = append(names, dep.Name)
assert.Contains(t, dep.Spec.Template.Annotations, "config-hash")
hashes = append(hashes, dep.Spec.Template.Annotations["config-hash"])
}
assert.ElementsMatch(t, []string{"rook-ceph-nfs-my-nfs-a", "rook-ceph-nfs-my-nfs-b"}, names)
assert.NotEqual(t, hashes[0], hashes[1])

svcs, err := r.context.Clientset.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
// Each NFS server gets a service.
assert.Len(t, svcs.Items, 2)
names = []string{}
for _, svc := range svcs.Items {
names = append(names, svc.Name)
}
assert.ElementsMatch(t, []string{"rook-ceph-nfs-my-nfs-a", "rook-ceph-nfs-my-nfs-b"}, names)
}
6 changes: 6 additions & 0 deletions pkg/operator/ceph/nfs/spec.go
Expand Up @@ -141,6 +141,12 @@ func (r *ReconcileCephNFS) makeDeployment(nfs *cephv1.CephNFS, cfg daemonConfig)
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Labels: getLabels(nfs, cfg.ID, true),
Annotations: map[string]string{
// set an annotation with the hash of the configmap data so that the pod will be
// re-deployed if the config in the configmap changes. otherwise, the pod won't
// restart when the config is updated.
"config-hash": cfg.ConfigConfigMapHash,
},
},
Spec: podSpec,
}
Expand Down
7 changes: 5 additions & 2 deletions pkg/operator/ceph/nfs/spec_test.go
Expand Up @@ -109,8 +109,9 @@ func TestDeploymentSpec(t *testing.T) {
id := "i"
configName := "rook-ceph-nfs-my-nfs-i"
cfg := daemonConfig{
ID: id,
ConfigConfigMap: configName,
ID: id,
ConfigConfigMap: configName,
ConfigConfigMapHash: "dcb0d2f5f5e86ec4929d8243cd640b8154165f8ff9b89809964fc7993e9b0101",
DataPathMap: &config.DataPathMap{
HostDataDir: "", // nfs daemon does not store data on host, ...
ContainerDataDir: cephclient.DefaultConfigDir, // does share data in containers using emptyDir, ...
Expand All @@ -120,6 +121,8 @@ func TestDeploymentSpec(t *testing.T) {

d, err := r.makeDeployment(nfs, cfg)
assert.NoError(t, err)
assert.NotEmpty(t, d.Spec.Template.Annotations)
assert.Equal(t, "dcb0d2f5f5e86ec4929d8243cd640b8154165f8ff9b89809964fc7993e9b0101", d.Spec.Template.Annotations["config-hash"])

// Deployment should have Ceph labels
optest.AssertLabelsContainRookRequirements(t, d.ObjectMeta.Labels, AppName)
Expand Down

0 comments on commit bd962e3

Please sign in to comment.