-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
cluster-stretched-aws.yaml
136 lines (134 loc) · 5.12 KB
/
cluster-stretched-aws.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#################################################################################################################
# Define the settings for the rook-ceph cluster with common settings for a production cluster.
# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
# in this example. See the documentation for more details on storage settings available.
# For example, to create the cluster:
# kubectl create -f crds.yaml -f common.yaml -f operator.yaml
# kubectl create -f cluster-stretched.yaml
#################################################################################################################
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph # namespace:cluster
spec:
dataDirHostPath: /var/lib/rook
mon:
# Five mons must be created for stretch mode
count: 5
allowMultiplePerNode: false
stretchCluster:
# The ceph failure domain will be extracted from the label, which by default is the zone. The nodes running OSDs must have
# this label in order for the OSDs to be configured in the correct topology. For topology labels, see
# https://rook.github.io/docs/rook/latest/ceph-cluster-crd.html#osd-topology.
failureDomainLabel: topology.kubernetes.io/zone
# The sub failure domain is the secondary level at which the data will be placed to maintain data durability and availability.
# The default is "host", which means that each OSD must be on a different node and you would need at least two nodes per zone.
# If the subFailureDomain is set to "osd", the OSDs would be allowed anywhere in the same zone including on the same node.
# If set to "rack" or some other intermediate failure domain, those labels would also need to be set on the nodes where
# the osds are started.
subFailureDomain: host
zones:
- name: us-east-2a
arbiter: true
- name: us-east-2b
- name: us-east-2c
volumeClaimTemplate:
spec:
storageClassName: gp2
resources:
requests:
storage: 10Gi
mgr:
count: 2
cephVersion:
# Stretch cluster support upstream is only available starting in Ceph Pacific
image: quay.io/ceph/ceph:v16.2.7
allowUnsupported: true
skipUpgradeChecks: false
continueUpgradeAfterChecksEvenIfNotHealthy: false
dashboard:
enabled: true
ssl: true
storage:
useAllNodes: false
useAllDevices: false
deviceFilter: ""
storageClassDeviceSets:
- name: set1
# The number of OSDs to create from this device set
count: 2
portable: true
placement:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- us-east-2b
preparePlacement:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- us-east-2b
volumeClaimTemplates:
- metadata:
name: data
spec:
resources:
requests:
storage: 10Gi
storageClassName: gp2
volumeMode: Block
accessModes:
- ReadWriteOnce
- name: set2
# The number of OSDs to create from this device set
count: 2
portable: true
placement:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- us-east-2c
preparePlacement:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- us-east-2c
volumeClaimTemplates:
- metadata:
name: data
spec:
resources:
requests:
storage: 10Gi
storageClassName: gp2
volumeMode: Block
accessModes:
- ReadWriteOnce
placement:
# The arbiter mon can have its own placement settings that will be different from the mons.
# If the arbiter section is not included in the placement, the arbiter will use the same placement
# settings as other mons. In this example, the arbiter has a toleration to run on a master node.
arbiter:
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
disruptionManagement:
managePodBudgets: true