-
Notifications
You must be signed in to change notification settings - Fork 2.6k
/
validate.go
215 lines (184 loc) · 6.69 KB
/
validate.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
/*
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package pool to manage a rook pool.
package pool
import (
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
)
// ValidatePool Validate the pool arguments
func ValidatePool(context *clusterd.Context, clusterInfo *client.ClusterInfo, clusterSpec *cephv1.ClusterSpec, p *cephv1.CephBlockPool) error {
if p.Name == "" {
return errors.New("missing name")
}
if p.Namespace == "" {
return errors.New("missing namespace")
}
if err := ValidatePoolSpec(context, clusterInfo, clusterSpec, &p.Spec); err != nil {
return err
}
return nil
}
// ValidatePoolSpec validates the Ceph block pool spec CR
func ValidatePoolSpec(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, clusterSpec *cephv1.ClusterSpec, p *cephv1.PoolSpec) error {
if p.IsHybridStoragePool() {
err := validateDeviceClasses(context, clusterInfo, p)
if err != nil {
return errors.Wrap(err, "failed to validate device classes for hybrid storage pool spec")
}
}
if p.IsReplicated() && p.IsErasureCoded() {
return errors.New("both replication and erasure code settings cannot be specified")
}
if p.FailureDomain != "" && p.Replicated.SubFailureDomain != "" {
if p.FailureDomain == p.Replicated.SubFailureDomain {
return errors.New("failure and subfailure domain cannot be identical")
}
}
// validate pools for stretch clusters
if clusterSpec.IsStretchCluster() {
if p.IsReplicated() {
if p.Replicated.Size != 4 {
return errors.New("pools in a stretch cluster must have replication size 4")
}
}
if p.IsErasureCoded() {
return errors.New("erasure coded pools are not supported in stretch clusters")
}
}
var crush client.CrushMap
var err error
if p.FailureDomain != "" || p.CrushRoot != "" {
crush, err = client.GetCrushMap(context, clusterInfo)
if err != nil {
return errors.Wrap(err, "failed to get crush map")
}
}
// validate the failure domain if specified
if p.FailureDomain != "" {
found := false
for _, t := range crush.Types {
if t.Name == p.FailureDomain {
found = true
break
}
}
if !found {
return errors.Errorf("unrecognized failure domain %s", p.FailureDomain)
}
}
// validate the crush root if specified
if p.CrushRoot != "" {
found := false
for _, t := range crush.Buckets {
if t.Name == p.CrushRoot {
found = true
break
}
}
if !found {
return errors.Errorf("unrecognized crush root %s", p.CrushRoot)
}
}
// validate the crush subdomain if specified
if p.Replicated.SubFailureDomain != "" {
found := false
for _, t := range crush.Types {
if t.Name == p.Replicated.SubFailureDomain {
found = true
break
}
}
if !found {
return errors.Errorf("unrecognized crush sub domain %s", p.Replicated.SubFailureDomain)
}
}
// validate pool replica size
if p.IsReplicated() {
if p.Replicated.Size == 1 && p.Replicated.RequireSafeReplicaSize {
return errors.Errorf("error pool size is %d and requireSafeReplicaSize is %t, must be false", p.Replicated.Size, p.Replicated.RequireSafeReplicaSize)
}
if p.Replicated.Size <= p.Replicated.ReplicasPerFailureDomain {
return errors.Errorf("error pool size is %d and replicasPerFailureDomain is %d, size must be greater", p.Replicated.Size, p.Replicated.ReplicasPerFailureDomain)
}
if p.Replicated.ReplicasPerFailureDomain != 0 && p.Replicated.Size%p.Replicated.ReplicasPerFailureDomain != 0 {
return errors.Errorf("error replicasPerFailureDomain is %d must be a factor of the replica count %d", p.Replicated.ReplicasPerFailureDomain, p.Replicated.Size)
}
}
// validate pool compression mode if specified
if p.CompressionMode != "" {
logger.Warning("compressionMode is DEPRECATED, use Parameters instead")
}
// Test the same for Parameters
if p.Parameters != nil {
compression, ok := p.Parameters[client.CompressionModeProperty]
if ok && compression != "" {
switch compression {
case "none", "passive", "aggressive", "force":
break
default:
return errors.Errorf("failed to validate pool spec unknown compression mode %q", compression)
}
}
}
// Validate mirroring settings
if p.Mirroring.Enabled {
switch p.Mirroring.Mode {
case "image", "pool":
break
default:
return errors.Errorf("unrecognized mirroring mode %q. only 'image and 'pool' are supported", p.Mirroring.Mode)
}
if p.Mirroring.SnapshotSchedulesEnabled() {
for _, snapSchedule := range p.Mirroring.SnapshotSchedules {
if snapSchedule.Interval == "" && snapSchedule.StartTime != "" {
return errors.New("schedule interval cannot be empty if start time is specified")
}
}
}
}
if !p.Mirroring.Enabled && p.Mirroring.SnapshotSchedulesEnabled() {
logger.Warning("mirroring must be enabled to configure snapshot scheduling")
}
return nil
}
// validateDeviceClasses validates the primary and secondary device classes in the HybridStorageSpec
func validateDeviceClasses(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo,
p *cephv1.PoolSpec) error {
primaryDeviceClass := p.Replicated.HybridStorage.PrimaryDeviceClass
secondaryDeviceClass := p.Replicated.HybridStorage.SecondaryDeviceClass
err := validateDeviceClassOSDs(context, clusterInfo, primaryDeviceClass)
if err != nil {
return errors.Wrapf(err, "failed to validate primary device class %q", primaryDeviceClass)
}
err = validateDeviceClassOSDs(context, clusterInfo, secondaryDeviceClass)
if err != nil {
return errors.Wrapf(err, "failed to validate secondary device class %q", secondaryDeviceClass)
}
return nil
}
// validateDeviceClassOSDs validates that the device class should have atleast one OSD
func validateDeviceClassOSDs(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo,
deviceClassName string) error {
deviceClassOSDs, err := cephclient.GetDeviceClassOSDs(context, clusterInfo, deviceClassName)
if err != nil {
return errors.Wrapf(err, "failed to get osds for the device class %q", deviceClassName)
}
if len(deviceClassOSDs) == 0 {
return errors.Errorf("no osds available for the device class %q", deviceClassName)
}
return nil
}