/
nfs.yaml
51 lines (50 loc) · 1.74 KB
/
nfs.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
apiVersion: ceph.rook.io/v1
kind: CephNFS
metadata:
name: my-nfs
namespace: rook-ceph # namespace:cluster
spec:
# For Ceph v15, the rados block is required. It is ignored for Ceph v16.
rados:
# RADOS pool where NFS configs are stored.
# In this example the data pool for the "myfs" filesystem is used.
# If using the object store example, the data pool would be "my-store.rgw.buckets.data".
# Note that this has nothing to do with where exported file systems or object stores live.
pool: myfs-replicated
# RADOS namespace where NFS client recovery data is stored in the pool.
namespace: nfs-ns
# Settings for the NFS server
server:
# the number of active NFS servers
active: 3
# where to run the NFS server
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - mds-node
# topologySpreadConstraints:
# tolerations:
# - key: mds-node
# operator: Exists
# podAffinity:
# podAntiAffinity:
# A key/value list of annotations
annotations:
# key: value
# The requests and limits set here allow the ganesha pod(s) to use half of one CPU core and 1 gigabyte of memory
resources:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# the priority class to set to influence the scheduler's pod preemption
#priorityClassName:
# The logging levels: NIV_NULL | NIV_FATAL | NIV_MAJ | NIV_CRIT | NIV_WARN | NIV_EVENT | NIV_INFO | NIV_DEBUG | NIV_MID_DEBUG |NIV_FULL_DEBUG |NB_LOG_LEVEL
logLevel: NIV_INFO