-
Notifications
You must be signed in to change notification settings - Fork 0
/
mds.go
400 lines (349 loc) · 15.5 KB
/
mds.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package mds provides methods for managing a Ceph mds cluster.
package mds
import (
"context"
"fmt"
"strings"
"syscall"
"time"
"github.com/banzaicloud/k8s-objectmatcher/patch"
"github.com/coreos/pkg/capnslog"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
"github.com/rook/rook/pkg/operator/ceph/cluster/mon"
"github.com/rook/rook/pkg/operator/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/controller"
cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/rook/rook/pkg/util/exec"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var logger = capnslog.NewPackageLogger("github.com/rook/rook", "op-mds")
const (
// AppName is the name of Rook's Ceph mds (File) sub-app
AppName = "rook-ceph-mds"
// timeout if mds is not ready for upgrade after some time
fsWaitForActiveTimeout = 3 * time.Minute
// minimum amount of memory in MB to run the pod
cephMdsPodMinimumMemory uint64 = 4096
)
// Cluster represents a Ceph mds cluster.
type Cluster struct {
clusterInfo *cephclient.ClusterInfo
context *clusterd.Context
clusterSpec *cephv1.ClusterSpec
fs cephv1.CephFilesystem
ownerInfo *k8sutil.OwnerInfo
dataDirHostPath string
}
type mdsConfig struct {
ResourceName string
DaemonID string
DataPathMap *config.DataPathMap // location to store data in container
}
// NewCluster creates a Ceph mds cluster representation.
func NewCluster(
clusterInfo *cephclient.ClusterInfo,
context *clusterd.Context,
clusterSpec *cephv1.ClusterSpec,
fs cephv1.CephFilesystem,
ownerInfo *k8sutil.OwnerInfo,
dataDirHostPath string,
) *Cluster {
return &Cluster{
clusterInfo: clusterInfo,
context: context,
clusterSpec: clusterSpec,
fs: fs,
ownerInfo: ownerInfo,
dataDirHostPath: dataDirHostPath,
}
}
// UpdateDeploymentAndWait can be overridden for unit tests. Do not alter this for runtime operation.
var UpdateDeploymentAndWait = mon.UpdateCephDeploymentAndWait
// Start starts or updates a Ceph mds cluster in Kubernetes.
func (c *Cluster) Start() error {
// Validate pod's memory if specified
err := controller.CheckPodMemory(cephv1.ResourcesKeyMDS, c.fs.Spec.MetadataServer.Resources, cephMdsPodMinimumMemory)
if err != nil {
return errors.Wrap(err, "error checking pod memory")
}
// If attempt was made to prepare daemons for upgrade, make sure that an attempt is made to
// bring fs state back to desired when this method returns with any error or success.
var fsPreparedForUpgrade = false
// upgrading MDS cluster needs to set max_mds to 1 and stop all stand-by MDSes first
isUpgrade, err := c.isCephUpgrade()
if err != nil {
return errors.Wrapf(err, "failed to determine if MDS cluster for filesystem %q needs upgraded", c.fs.Name)
}
if isUpgrade {
fsPreparedForUpgrade = true
if err := c.upgradeMDS(); err != nil {
return errors.Wrapf(err, "failed to upgrade MDS cluster for filesystem %q", c.fs.Name)
}
logger.Infof("successfully upgraded MDS cluster for filesystem %q", c.fs.Name)
}
defer func() {
if fsPreparedForUpgrade {
if err := finishedWithDaemonUpgrade(c.context, c.clusterInfo, c.fs); err != nil {
logger.Errorf("for filesystem %q, USER should make sure the Ceph fs max_mds property is set to %d. %v",
c.fs.Name, c.fs.Spec.MetadataServer.ActiveCount, err)
}
}
}()
// Always create double the number of metadata servers to have standby mdses available
replicas := c.fs.Spec.MetadataServer.ActiveCount * 2
// keep list of deployments we want so unwanted ones can be deleted later
desiredDeployments := map[string]bool{} // improvised set
// Create/update deployments
for i := 0; i < int(replicas); i++ {
deployment, err := c.startDeployment(c.clusterInfo.Context, k8sutil.IndexToName(i))
if err != nil {
return errors.Wrapf(err, "failed to start deployment for MDS %q for filesystem %q", k8sutil.IndexToName(i), c.fs.Name)
}
desiredDeployments[deployment] = true
}
if err := c.scaleDownDeployments(replicas, c.fs.Spec.MetadataServer.ActiveCount, desiredDeployments, true); err != nil {
return errors.Wrap(err, "failed to scale down mds deployments")
}
return nil
}
func (c *Cluster) startDeployment(ctx context.Context, daemonLetterID string) (string, error) {
// Each mds is id'ed by <fsname>-<letterID>
daemonName := fmt.Sprintf("%s-%s", c.fs.Name, daemonLetterID)
// resource name is rook-ceph-mds-<fs_name>-<daemon_name>
resourceName := fmt.Sprintf("%s-%s-%s", AppName, c.fs.Name, daemonLetterID)
mdsConfig := &mdsConfig{
ResourceName: resourceName,
DaemonID: daemonName,
DataPathMap: config.NewStatelessDaemonDataPathMap(config.MdsType, daemonName, c.fs.Namespace, c.dataDirHostPath),
}
// create unique key for each mds saved to k8s secret
_, err := c.generateKeyring(mdsConfig)
if err != nil {
return "", errors.Wrapf(err, "failed to generate keyring for %q", resourceName)
}
// Set the mds config flags
// Previously we were checking if the deployment was present, if not we would set the config flags
// Which means that we would only set the flag on newly created CephFilesystem CR
// Unfortunately, on upgrade we would not set the flags which is not ideal for old clusters where we were no setting those flags
// The KV supports setting those flags even if the MDS is running
logger.Info("setting mds config flags")
err = c.setDefaultFlagsMonConfigStore(mdsConfig.DaemonID)
if err != nil {
// Getting EPERM typically happens when the flag may not be modified at runtime
// This is fine to ignore
code, ok := exec.ExitStatus(err)
if ok && code != int(syscall.EPERM) {
return "", errors.Wrap(err, "failed to set default rgw config options")
}
}
// start the deployment
d, err := c.makeDeployment(mdsConfig, c.fs.Namespace)
if err != nil {
return "", errors.Wrap(err, "failed to create deployment")
}
// Set owner ref to cephFilesystem object
err = c.ownerInfo.SetControllerReference(d)
if err != nil {
return "", errors.Wrapf(err, "failed to set owner reference for ceph filesystem %q secret", d.Name)
}
// Set the deployment hash as an annotation
err = patch.DefaultAnnotator.SetLastAppliedAnnotation(d)
if err != nil {
return "", errors.Wrapf(err, "failed to set annotation for deployment %q", d.Name)
}
_, createErr := c.context.Clientset.AppsV1().Deployments(c.fs.Namespace).Create(ctx, d, metav1.CreateOptions{})
if createErr != nil {
if !kerrors.IsAlreadyExists(createErr) {
return "", errors.Wrapf(createErr, "failed to create mds deployment %s", mdsConfig.ResourceName)
}
logger.Infof("deployment for mds %q already exists. updating if needed", mdsConfig.ResourceName)
_, err = c.context.Clientset.AppsV1().Deployments(c.fs.Namespace).Get(ctx, d.Name, metav1.GetOptions{})
if err != nil {
return "", errors.Wrapf(err, "failed to get existing mds deployment %q for update", d.Name)
}
}
if createErr != nil && kerrors.IsAlreadyExists(createErr) {
if err = UpdateDeploymentAndWait(c.context, c.clusterInfo, d, config.MdsType, daemonLetterID, c.clusterSpec.SkipUpgradeChecks, c.clusterSpec.ContinueUpgradeAfterChecksEvenIfNotHealthy); err != nil {
return "", errors.Wrapf(err, "failed to update mds deployment %q", d.Name)
}
}
return d.GetName(), nil
}
// isCephUpgrade determine if mds version inferior than image
func (c *Cluster) isCephUpgrade() (bool, error) {
allVersions, err := cephclient.GetAllCephDaemonVersions(c.context, c.clusterInfo)
if err != nil {
return false, err
}
for key := range allVersions.Mds {
currentVersion, err := cephver.ExtractCephVersion(key)
if err != nil {
return false, err
}
if cephver.IsSuperior(c.clusterInfo.CephVersion, *currentVersion) {
logger.Debugf("ceph version for MDS %q is %q and target version is %q", key, currentVersion.String(), c.clusterInfo.CephVersion.String())
return true, err
}
}
return false, nil
}
func (c *Cluster) upgradeMDS() error {
logger.Infof("upgrading MDS cluster for filesystem %q", c.fs.Name)
// 1. set allow_standby_replay to false
if err := cephclient.AllowStandbyReplay(c.context, c.clusterInfo, c.fs.Name, false); err != nil {
return errors.Wrap(err, "failed to setting allow_standby_replay to false")
}
// In Pacific, standby-replay daemons are stopped automatically. Older versions of Ceph require
// us to stop these daemons manually.
if err := cephclient.FailAllStandbyReplayMDS(c.context, c.clusterInfo, c.fs.Name); err != nil {
return errors.Wrap(err, "failed to fail mds agent in up:standby-replay state")
}
// 2. set max_mds to 1
logger.Debug("start setting active mds count to 1")
if err := cephclient.SetNumMDSRanks(c.context, c.clusterInfo, c.fs.Name, 1); err != nil {
return errors.Wrapf(err, "failed setting active mds count to %d", 1)
}
// 3. wait for ranks to be 0
if err := cephclient.WaitForActiveRanks(c.context, c.clusterInfo, c.fs.Name, 1, false, fsWaitForActiveTimeout); err != nil {
return errors.Wrap(err, "failed waiting for active ranks to be 1")
}
// 4. stop standby daemons
daemonName, err := cephclient.GetMdsIdByRank(c.context, c.clusterInfo, c.fs.Name, 0)
if err != nil {
return errors.Wrap(err, "failed to get mds id from rank 0")
}
daemonNameTokens := strings.Split(daemonName, "-")
daemonLetterID := daemonNameTokens[len(daemonNameTokens)-1]
desiredDeployments := map[string]bool{
fmt.Sprintf("%s-%s-%s", AppName, c.fs.Name, daemonLetterID): true,
}
logger.Debugf("stop mds other than %s", daemonName)
err = c.scaleDownDeployments(1, 1, desiredDeployments, false)
if err != nil {
return errors.Wrap(err, "failed to scale down deployments during upgrade")
}
logger.Debugf("waiting for all standbys gone")
if err := cephclient.WaitForNoStandbys(c.context, c.clusterInfo, 120*time.Second); err != nil {
return errors.Wrap(err, "failed to wait for stopping all standbys")
}
// 5. upgrade current active deployment and wait for it come back
_, err = c.startDeployment(c.clusterInfo.Context, daemonLetterID)
if err != nil {
return errors.Wrapf(err, "failed to upgrade mds %q", daemonName)
}
logger.Debugf("successfully started daemon %q", daemonName)
// 6. all other MDS daemons will be updated and restarted by main MDS code path
// 7. max_mds & allow_standby_replay will be reset in deferred function finishedWithDaemonUpgrade
return nil
}
func (c *Cluster) scaleDownDeployments(replicas int32, activeCount int32, desiredDeployments map[string]bool, delete bool) error {
// Remove extraneous mds deployments if they exist
deps, err := getMdsDeployments(c.clusterInfo.Context, c.context, c.fs.Namespace, c.fs.Name)
if err != nil {
return errors.Wrapf(err,
fmt.Sprintf("cannot verify the removal of extraneous mds deployments for filesystem %s. ", c.fs.Name)+
fmt.Sprintf("USER should make sure that only deployments %+v exist which match the filesystem's label selector", desiredDeployments),
)
}
if !(len(deps.Items) > int(replicas)) {
// It's possible to check if there are fewer deployments than desired here, but that's
// checked above, and if that condition exists here, it's likely the user's manual actions.
logger.Debugf("The number of mds deployments (%d) is not greater than the number desired (%d). no extraneous deployments to delete",
len(deps.Items), replicas)
return nil
}
errCount := 0
for _, d := range deps.Items {
if _, ok := desiredDeployments[d.GetName()]; !ok {
// if the extraneous mdses are the only ones active, Ceph may experience fs downtime
// if deleting them too quickly; therefore, wait until number of active mdses is desired
if err := cephclient.WaitForActiveRanks(c.context, c.clusterInfo, c.fs.Name, activeCount, true, fsWaitForActiveTimeout); err != nil {
errCount++
logger.Errorf(
"number of active mds ranks is not as desired. it is potentially unsafe to continue with extraneous mds deletion, so stopping. " +
fmt.Sprintf("USER should delete undesired mds daemons once filesystem %s is healthy. ", c.fs.Name) +
fmt.Sprintf("desired mds deployments for this filesystem are %+v", desiredDeployments) +
fmt.Sprintf(". %v", err),
)
break // stop trying to delete daemons, but continue to reporting any errors below
}
localdeployment := d
if !delete {
// stop mds daemon only by scaling deployment replicas to 0
if err := scaleMdsDeployment(c.clusterInfo.Context, c.context, c.fs.Namespace, &localdeployment, 0); err != nil {
errCount++
logger.Errorf("failed to scale mds deployment %q. %v", localdeployment.GetName(), err)
}
continue
}
if err := deleteMdsDeployment(c.clusterInfo.Context, c.context, c.fs.Namespace, &localdeployment); err != nil {
errCount++
logger.Errorf("failed to delete mds deployment. %v", err)
}
daemonName := strings.Replace(d.GetName(), fmt.Sprintf("%s-", AppName), "", -1)
err := c.DeleteMdsCephObjects(daemonName)
if err != nil {
logger.Errorf("%v", err)
}
}
}
if errCount > 0 {
return errors.Wrapf(err, "%d error(s) during deletion of extraneous mds deployments, see logs above", errCount)
}
deletedOrStopped := "deleted"
if !delete {
deletedOrStopped = "stopped"
}
logger.Infof("successfully %s unwanted MDS deployments", deletedOrStopped)
return nil
}
func (c *Cluster) DeleteMdsCephObjects(mdsID string) error {
monStore := config.GetMonStore(c.context, c.clusterInfo)
who := fmt.Sprintf("mds.%s", mdsID)
err := monStore.DeleteDaemon(who)
if err != nil {
return errors.Wrapf(err, "failed to delete mds config for %q in mon configuration database", who)
}
logger.Infof("successfully deleted mds config for %q in mon configuration database", who)
err = cephclient.AuthDelete(c.context, c.clusterInfo, who)
if err != nil {
return err
}
logger.Infof("successfully deleted mds CephX key for %q", who)
return nil
}
// finishedWithDaemonUpgrade performs all actions necessary to bring the filesystem back to its
// ideal state following an upgrade of its daemon(s).
func finishedWithDaemonUpgrade(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, fs cephv1.CephFilesystem) error {
fsName := fs.Name
activeMDSCount := fs.Spec.MetadataServer.ActiveCount
logger.Debugf("restoring filesystem %s from daemon upgrade", fsName)
logger.Debugf("bringing num active MDS daemons for fs %s back to %d", fsName, activeMDSCount)
// TODO: Unknown (Aug 2021) if this can be removed once Rook no longer supports Octopus.
// upgrade guide according to octopus https://docs.ceph.com/en/octopus/cephfs/upgrading/
if err := cephclient.SetNumMDSRanks(context, clusterInfo, fsName, activeMDSCount); err != nil {
return errors.Wrapf(err, "Failed to restore filesystem %s following daemon upgrade", fsName)
}
// set allow_standby_replay back
if err := cephclient.AllowStandbyReplay(context, clusterInfo, fsName, fs.Spec.MetadataServer.ActiveStandby); err != nil {
return errors.Wrap(err, "failed to set allow_standby_replay to true")
}
return nil
}