/
object.yaml
141 lines (140 loc) · 5.56 KB
/
object.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
#################################################################################################################
# Create an object store with settings for replication in a production environment. A minimum of 3 hosts with
# OSDs are required in this example.
# kubectl create -f object.yaml
#################################################################################################################
apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
name: my-store
namespace: rook-ceph # namespace:cluster
spec:
# The pool spec used to create the metadata pools. Must use replication.
metadataPool:
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
parameters:
# Inline compression mode for the data pool
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
compression_mode: none
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#target_size_ratio: ".5"
# The pool spec used to create the data pool. Can use replication or erasure coding.
dataPool:
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
parameters:
# Inline compression mode for the data pool
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
compression_mode: none
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#target_size_ratio: ".5"
# Whether to preserve metadata and data pools on object store deletion
preservePoolsOnDelete: false
# The gateway service configuration
gateway:
# A reference to the secret in the rook namespace where the ssl certificate is stored
# sslCertificateRef:
# A reference to the secret in the rook namespace where the ca bundle is stored
# caBundleRef:
# The port that RGW pods will listen on (http)
port: 80
# The port that RGW pods will listen on (https). An ssl certificate is required.
# securePort: 443
# The number of pods in the rgw deployment
instances: 1
# The affinity rules to apply to the rgw deployment.
placement:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- rook-ceph-rgw
# topologyKey: */zone can be used to spread RGW across different AZ
# Use <topologyKey: failure-domain.beta.kubernetes.io/zone> in k8s cluster if your cluster is v1.16 or lower
# Use <topologyKey: topology.kubernetes.io/zone> in k8s cluster is v1.17 or upper
topologyKey: kubernetes.io/hostname
# A key/value list of annotations
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - rgw-node
# topologySpreadConstraints:
# tolerations:
# - key: rgw-node
# operator: Exists
# podAffinity:
# podAntiAffinity:
# A key/value list of annotations
annotations:
# key: value
# A key/value list of labels
labels:
# key: value
resources:
# The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# priorityClassName: my-priority-class
#zone:
#name: zone-a
# service endpoint healthcheck
healthCheck:
bucket:
disabled: false
interval: 60s
# Configure the pod probes for the rgw daemon
startupProbe:
disabled: false
livenessProbe:
disabled: false
readinessProbe:
disabled: false
# security oriented settings
# security:
# To enable the KMS configuration properly don't forget to uncomment the Secret at the end of the file
# kms:
# # name of the config map containing all the kms connection details
# connectionDetails:
# KMS_PROVIDER: "vault"
# VAULT_ADDR: VAULT_ADDR_CHANGE_ME # e,g: http://vault.my-domain.com:8200
# VAULT_BACKEND_PATH: "rook"
# VAULT_SECRET_ENGINE: "kv"
# VAULT_BACKEND: v2
# # name of the secret containing the kms authentication token
# tokenSecretName: rook-vault-token
# # UNCOMMENT THIS TO ENABLE A KMS CONNECTION
# # Also, do not forget to replace both:
# # * ROOK_TOKEN_CHANGE_ME: with a base64 encoded value of the token to use
# # * VAULT_ADDR_CHANGE_ME: with the Vault address
# ---
# apiVersion: v1
# kind: Secret
# metadata:
# name: rook-vault-token
# namespace: rook-ceph # namespace:cluster
# data:
# token: ROOK_TOKEN_CHANGE_ME