-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
health.go
287 lines (242 loc) · 10.2 KB
/
health.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
/*
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"context"
"fmt"
"time"
"github.com/ceph/go-ceph/rgw/admin"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
s3UserHealthCheckName = "rook-ceph-internal-s3-user-checker"
s3HealthCheckBucketName = "rook-ceph-bucket-checker"
s3HealthCheckObjectBody = "Test Rook Object Data"
s3HealthCheckObjectKey = "rookHealthCheckTestObject"
contentType = "plain/text"
)
var (
defaultHealthCheckInterval = 1 * time.Minute
)
// bucketChecker aggregates the mon/cluster info needed to check the health of the monitors
type bucketChecker struct {
context *clusterd.Context
objContext *AdminOpsContext
interval *time.Duration
port int32
client client.Client
namespacedName types.NamespacedName
objectStoreSpec *cephv1.ObjectStoreSpec
}
// newbucketChecker creates a new HealthChecker object
func newBucketChecker(
ctx *clusterd.Context, objContext *Context, client client.Client, namespacedName types.NamespacedName, objectStoreSpec *cephv1.ObjectStoreSpec,
) (*bucketChecker, error) {
port, err := objectStoreSpec.GetPort()
if err != nil {
return nil, errors.Wrapf(err, "failed to create bucket checker for CephObjectStore %q", namespacedName.String())
}
opsCtx, err := NewMultisiteAdminOpsContext(objContext, objectStoreSpec)
if err != nil {
return nil, errors.Wrapf(err, "failed to create bucket checker for CephObjectStore %q", namespacedName.String())
}
c := &bucketChecker{
context: ctx,
objContext: opsCtx,
interval: &defaultHealthCheckInterval,
port: port,
namespacedName: namespacedName,
client: client,
objectStoreSpec: objectStoreSpec,
}
// allow overriding the check interval
checkInterval := objectStoreSpec.HealthCheck.Bucket.Interval
if checkInterval != nil {
logger.Infof("ceph rgw status check interval for object store %q is %q", namespacedName.Name, checkInterval.Duration.String())
c.interval = &checkInterval.Duration
}
return c, nil
}
// checkObjectStore periodically checks the health of the cluster
func (c *bucketChecker) checkObjectStore(stopCh chan struct{}) {
// check the object store health immediately before starting the loop
err := c.checkObjectStoreHealth()
if err != nil {
updateStatusBucket(c.client, c.namespacedName, cephv1.ConditionFailure, err.Error())
logger.Debugf("failed to check rgw health for object store %q. %v", c.namespacedName.Name, err)
}
for {
select {
case <-stopCh:
// purge bucket and s3 user
// Needed for external mode where in converged everything goes away with the CR deletion
c.cleanupHealthCheck()
logger.Infof("stopping monitoring of rgw endpoints for object store %q", c.namespacedName.Name)
return
case <-time.After(*c.interval):
logger.Debugf("checking rgw health of object store %q", c.namespacedName.Name)
err := c.checkObjectStoreHealth()
if err != nil {
updateStatusBucket(c.client, c.namespacedName, cephv1.ConditionFailure, err.Error())
logger.Debugf("failed to check rgw health for object store %q. %v", c.namespacedName.Name, err)
}
}
}
}
func (c *bucketChecker) checkObjectStoreHealth() error {
/*
0. purge the s3 object by default
1. create an S3 user
2. always use the same user
3. if already exists just re-hydrate the s3 credentials
4. create a bucket with that user or use the existing one (always use the same bucket)
5. create a check file
6. get the hash of the file
7. PUT the file
8. GET the file
9. compare hashes
10. delete object on bucket
11. update CR health status check
Always keep the bucket and the user for the health check, just do PUT and GET because bucket creation is expensive
*/
// Keep admin ops context up-to date if there are config changes
if err := UpdateEndpoint(&c.objContext.Context, c.objectStoreSpec); err != nil {
return errors.Wrapf(err, "failed to parse updated CephObjectStore spec")
}
// Generate unique user and bucket name
bucketName := genHealthCheckerBucketName(c.objContext.UID)
userConfig := genUserCheckerConfig(c.objContext.UID)
// Create checker user
logger.Debugf("creating s3 user object %q for object store %q health check", userConfig.ID, c.namespacedName.Name)
var user admin.User
user, err := c.objContext.AdminOpsClient.GetUser(context.TODO(), userConfig)
if err != nil {
if errors.Is(err, admin.ErrNoSuchUser) {
user, err = c.objContext.AdminOpsClient.CreateUser(context.TODO(), userConfig)
if err != nil {
return errors.Wrapf(err, "failed to create from ceph object user %v", userConfig.ID)
}
} else {
return errors.Wrapf(err, "failed to get details from ceph object user %q", userConfig.ID)
}
}
// Set access and secret key
tlsCert := c.objContext.TlsCert
s3endpoint := c.objContext.Endpoint
s3AccessKey := user.Keys[0].AccessKey
s3SecretKey := user.Keys[0].SecretKey
// Initiate s3 agent
logger.Debugf("initializing s3 connection for object store %q", c.namespacedName.Name)
s3client, err := NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", false, tlsCert)
if err != nil {
return errors.Wrap(err, "failed to initialize s3 connection")
}
// Force purge the s3 object before starting anything
cleanupObjectHealthCheck(s3client, c.objContext.UID)
// Bucket health test
err = c.testBucketHealth(s3client, bucketName)
if err != nil {
return errors.Wrapf(err, "failed to run bucket health checks for object store %q", c.namespacedName.String())
}
logger.Debugf("successfully checked object store endpoint for object store %q", c.namespacedName.String())
// Update the EndpointStatus in the CR to reflect the healthyness
updateStatusBucket(c.client, c.namespacedName, cephv1.ConditionConnected, "")
return nil
}
func cleanupObjectHealthCheck(s3client *S3Agent, objectStoreUID string) {
bucketToDelete := genHealthCheckerBucketName(objectStoreUID)
logger.Debugf("deleting object %q from bucket %q", s3HealthCheckObjectKey, bucketToDelete)
_, err := s3client.DeleteObjectInBucket(bucketToDelete, s3HealthCheckObjectKey)
if err != nil {
logger.Errorf("failed to delete object in bucket. %v", err)
}
}
func (c *bucketChecker) cleanupHealthCheck() {
bucketToDelete := genHealthCheckerBucketName(c.objContext.UID)
logger.Infof("deleting object %q from bucket %q in object store %q", s3HealthCheckObjectKey, bucketToDelete, c.namespacedName.Name)
thePurge := true
err := c.objContext.AdminOpsClient.RemoveBucket(context.TODO(), admin.Bucket{Bucket: bucketToDelete, PurgeObject: &thePurge})
if err != nil {
if errors.Is(err, admin.ErrNoSuchBucket) {
// opinion: "not found" is not an error
logger.Debugf("bucket %q does not exist", bucketToDelete)
} else {
logger.Errorf("failed to delete bucket %q for object store %q. %v", bucketToDelete, c.namespacedName.Name, err)
}
}
userToDelete := genUserCheckerConfig(c.objContext.UID)
err = c.objContext.AdminOpsClient.RemoveUser(context.TODO(), userToDelete)
if err != nil && !errors.Is(err, admin.ErrNoSuchUser) {
logger.Errorf("failed to delete object user %q for object store %q. %v", userToDelete.ID, c.namespacedName.Name, err)
}
logger.Debugf("successfully deleted object user %q for object store %q", userToDelete.ID, c.namespacedName.Name)
}
func toCustomResourceStatus(currentStatus *cephv1.BucketStatus, details string, health cephv1.ConditionType) *cephv1.BucketStatus {
s := &cephv1.BucketStatus{
Health: health,
LastChecked: time.Now().UTC().Format(time.RFC3339),
Details: details,
}
if currentStatus != nil {
s.LastChanged = currentStatus.LastChanged
if currentStatus.Details != s.Details {
s.LastChanged = s.LastChecked
}
}
return s
}
func genHealthCheckerBucketName(uuid string) string {
return fmt.Sprintf("%s-%s", s3HealthCheckBucketName, uuid)
}
func genUserCheckerConfig(cephObjectStoreUID string) admin.User {
userName := fmt.Sprintf("%s-%s", s3UserHealthCheckName, cephObjectStoreUID)
return admin.User{
ID: userName,
DisplayName: userName,
}
}
func (c *bucketChecker) testBucketHealth(s3client *S3Agent, bucket string) error {
// Purge on exit
defer cleanupObjectHealthCheck(s3client, c.objContext.UID)
// Create S3 bucket
logger.Debugf("creating bucket %q", bucket)
err := s3client.CreateBucketNoInfoLogging(bucket)
if err != nil {
return errors.Wrapf(err, "failed to create bucket %q for object store %q", bucket, c.namespacedName.Name)
}
// Put an object into the bucket
logger.Debugf("putting object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name)
_, err = s3client.PutObjectInBucket(bucket, string(s3HealthCheckObjectBody), s3HealthCheckObjectKey, contentType)
if err != nil {
return errors.Wrapf(err, "failed to put object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name)
}
// Get the object from the bucket
logger.Debugf("getting object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name)
read, err := s3client.GetObjectInBucket(bucket, s3HealthCheckObjectKey)
if err != nil {
return errors.Wrapf(err, "failed to get object %q in bucket %q for object store %q", s3HealthCheckObjectKey, bucket, c.namespacedName.Name)
}
// Compare the old and the existing object
logger.Debugf("comparing objects hash for object store %q", c.namespacedName.Name)
oldHash := k8sutil.Hash(s3HealthCheckObjectBody)
currentHash := k8sutil.Hash(read)
if currentHash != oldHash {
return errors.Wrapf(err, "wrong file content, old file hash is %q and new one is %q for object store %q", oldHash, currentHash, c.namespacedName.Name)
}
return nil
}