diff --git a/build/rbac/keep-rbac-yaml.sh b/build/rbac/keep-rbac-yaml.sh index 31ddb84d6ae89..3b53d3378236e 100755 --- a/build/rbac/keep-rbac-yaml.sh +++ b/build/rbac/keep-rbac-yaml.sh @@ -27,7 +27,9 @@ $YQ eval ' select(.kind == "Role"), select(.kind == "RoleBinding") ' - | # select all RBAC resource Kinds -$YQ eval 'del(.metadata.labels.chart)' - | # remove the 'chart' label that only applies to Helm-managed resources +$YQ eval 'del(.metadata.labels.helm.sh/chart)' - | # remove the 'chart' label that only applies to Helm-managed resources +$YQ eval 'del(.metadata.labels.app.kubernetes.io/managed-by)' - | # remove the 'labels.app.kubernetes.io/managed-by' label that only applies to Helm-managed resources +$YQ eval 'del(.metadata.labels.app.kubernetes.io/created-by)' - | # remove the 'app.kubernetes.io/created-by' label that only applies to Helm-managed resources sed '/^$/d' | # remove empty lines caused by yq's display of header/footer comments sed '/^# Source: /d' | # helm adds '# Source: ' comments atop of each yaml doc. Strip these $YQ eval --split-exp '.kind + " " + .metadata.name + " "' - # split into files by .yaml diff --git a/build/rbac/rbac.yaml b/build/rbac/rbac.yaml index 2095060f2a665..89a6d931b95f1 100644 --- a/build/rbac/rbac.yaml +++ b/build/rbac/rbac.yaml @@ -1,4 +1,211 @@ --- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-nodeplugin +roleRef: + kind: ClusterRole + name: rbd-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rbd-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-global +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-cluster +subjects: + - kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +kind: ClusterRoleBinding +# Give Rook-Ceph Operator permissions to provision ObjectBuckets in response to ObjectBucketClaims. +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-object-bucket +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-object-bucket +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-ceph-system-psp + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-system +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-cephfs-plugin-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-cephfs-provisioner-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-rbd-plugin-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-rbd-provisioner-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +--- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -75,6 +282,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" rules: - apiGroups: - policy @@ -186,6 +397,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" rules: - apiGroups: - "" @@ -218,6 +433,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" rules: - apiGroups: - "" @@ -343,6 +562,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" rules: - apiGroups: - "" @@ -399,6 +622,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" rules: - apiGroups: [""] resources: ["secrets", "configmaps"] @@ -464,6 +691,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" rules: # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] @@ -476,277 +707,284 @@ rules: resources: ["pods/exec"] verbs: ["create"] --- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-nodeplugin -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: cephfs-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: cephfs-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-nodeplugin -roleRef: - kind: ClusterRole - name: rbd-csi-nodeplugin - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph # namespace:operator ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 +# We expect most Kubernetes teams to follow the Kubernetes docs and have these PSPs. +# * privileged (for kube-system namespace) +# * restricted (for all logged in users) +# +# PSPs are applied based on the first match alphabetically. `rook-ceph-operator` comes after +# `restricted` alphabetically, so we name this `00-rook-privileged`, so it stays somewhere +# close to the top and so `rook-system` gets the intended PSP. This may need to be renamed in +# environments with other `00`-prefixed PSPs. +# +# More on PSP ordering: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy metadata: - name: rbd-csi-provisioner-role -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: ClusterRole - name: rbd-external-provisioner-runner - apiGroup: rbac.authorization.k8s.io + name: 00-rook-privileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: true + allowedCapabilities: + # required by CSI + - SYS_ADMIN + fsGroup: + rule: RunAsAny + # runAsUser, supplementalGroups - Rook needs to run some pods as root + # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + # seLinux - seLinux context is unknown ahead of time; set if this is well-known + seLinux: + rule: RunAsAny + volumes: + # recommended minimum set + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - secret + - projected + # required for Rook + - hostPath + # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known + # allowedHostPaths: + # - pathPrefix: "/run/udev" # for OSD prep + # readOnly: false + # - pathPrefix: "/dev" # for OSD prep + # readOnly: false + # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to + # readOnly: false + # Ceph requires host IPC for setting up encrypted devices + hostIPC: true + # Ceph OSDs need to share the same PID namespace + hostPID: true + # hostNetwork can be set to 'false' if host networking isn't used + hostNetwork: true + hostPorts: + # Ceph messenger protocol v1 + - min: 6789 + max: 6790 # <- support old default port + # Ceph messenger protocol v2 + - min: 3300 + max: 3300 + # Ceph RADOS ports for OSDs, MDSes + - min: 6800 + max: 7300 + # # Ceph dashboard port HTTP (not recommended) + # - min: 7000 + # max: 7000 + # Ceph dashboard port HTTPS + - min: 8443 + max: 8443 + # Ceph mgr Prometheus Metrics + - min: 9283 + max: 9283 --- -# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes -kind: ClusterRoleBinding +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: rook-ceph-global - labels: - operator: rook - storage-backend: ceph + name: cephfs-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator roleRef: + kind: Role + name: cephfs-external-provisioner-cfg apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-global +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator subjects: - kind: ServiceAccount - name: rook-ceph-system + name: rook-csi-rbd-provisioner-sa namespace: rook-ceph # namespace:operator +roleRef: + kind: Role + name: rbd-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io --- -# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules -kind: ClusterRoleBinding +# Allow the operator to create resources in this cluster's namespace +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: rook-ceph-mgr-cluster + name: rook-ceph-cluster-mgmt + namespace: rook-ceph # namespace:cluster roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: rook-ceph-mgr-cluster + name: rook-ceph-cluster-mgmt subjects: - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster + name: rook-ceph-system + namespace: rook-ceph # namespace:operator --- -kind: ClusterRoleBinding -# Give Rook-Ceph Operator permissions to provision ObjectBuckets in response to ObjectBucketClaims. apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding metadata: - name: rook-ceph-object-bucket + name: rook-ceph-cmd-reporter-psp + namespace: rook-ceph # namespace:cluster roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: rook-ceph-object-bucket + name: psp:rook subjects: - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster --- -# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location -kind: ClusterRoleBinding +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: rook-ceph-osd + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-osd + kind: Role + name: rook-ceph-cmd-reporter subjects: - kind: ServiceAccount - name: rook-ceph-osd + name: rook-ceph-cmd-reporter namespace: rook-ceph # namespace:cluster --- -kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding metadata: - name: rook-ceph-system + name: rook-ceph-default-psp + namespace: rook-ceph # namespace:cluster labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: rook-ceph-system + name: psp:rook subjects: - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator + name: default + namespace: rook-ceph # namespace:cluster --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: - name: rook-ceph-system-psp - labels: - operator: rook - storage-backend: ceph + name: rook-ceph-mgr-psp + namespace: rook-ceph # namespace:cluster roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: 'psp:rook' + name: psp:rook subjects: - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster --- +# Allow the ceph mgr to access resources in the Rook operator namespace necessary for mgr modules +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding metadata: - name: rook-csi-cephfs-plugin-sa-psp + name: rook-ceph-mgr-system + namespace: rook-ceph # namespace:operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: 'psp:rook' + name: rook-ceph-mgr-system subjects: - kind: ServiceAccount - name: rook-csi-cephfs-plugin-sa - namespace: rook-ceph # namespace:operator + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster --- +# Allow the ceph mgr to access resources scoped to the CephCluster namespace necessary for mgr modules +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding metadata: - name: rook-csi-cephfs-provisioner-sa-psp + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' + kind: Role + name: rook-ceph-mgr subjects: - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph # namespace:operator + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: - name: rook-csi-rbd-plugin-sa-psp + name: rook-ceph-osd-psp + namespace: rook-ceph # namespace:cluster roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: 'psp:rook' + name: psp:rook subjects: - kind: ServiceAccount - name: rook-csi-rbd-plugin-sa - namespace: rook-ceph # namespace:operator + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster --- +# Allow the osd pods in this namespace to work with configmaps +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding metadata: - name: rook-csi-rbd-provisioner-sa-psp + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: 'psp:rook' + kind: Role + name: rook-ceph-osd subjects: - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph # namespace:operator + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster --- -# We expect most Kubernetes teams to follow the Kubernetes docs and have these PSPs. -# * privileged (for kube-system namespace) -# * restricted (for all logged in users) -# -# PSPs are applied based on the first match alphabetically. `rook-ceph-operator` comes after -# `restricted` alphabetically, so we name this `00-rook-privileged`, so it stays somewhere -# close to the top and so `rook-system` gets the intended PSP. This may need to be renamed in -# environments with other `00`-prefixed PSPs. -# -# More on PSP ordering: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy +# Allow the osd purge job to run in this namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: 00-rook-privileged - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' -spec: - privileged: true - allowedCapabilities: - # required by CSI - - SYS_ADMIN - fsGroup: - rule: RunAsAny - # runAsUser, supplementalGroups - Rook needs to run some pods as root - # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - # seLinux - seLinux context is unknown ahead of time; set if this is well-known - seLinux: - rule: RunAsAny - volumes: - # recommended minimum set - - configMap - - downwardAPI - - emptyDir - - persistentVolumeClaim - - secret - - projected - # required for Rook - - hostPath - # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known - # allowedHostPaths: - # - pathPrefix: "/run/udev" # for OSD prep - # readOnly: false - # - pathPrefix: "/dev" # for OSD prep - # readOnly: false - # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to - # readOnly: false - # Ceph requires host IPC for setting up encrypted devices - hostIPC: true - # Ceph OSDs need to share the same PID namespace - hostPID: true - # hostNetwork can be set to 'false' if host networking isn't used - hostNetwork: true - hostPorts: - # Ceph messenger protocol v1 - - min: 6789 - max: 6790 # <- support old default port - # Ceph messenger protocol v2 - - min: 3300 - max: 3300 - # Ceph RADOS ports for OSDs, MDSes - - min: 6800 - max: 7300 - # # Ceph dashboard port HTTP (not recommended) - # - min: 7000 - # max: 7000 - # Ceph dashboard port HTTPS - - min: 8443 - max: 8443 - # Ceph mgr Prometheus Metrics - - min: 9283 - max: 9283 + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-purge-osd +subjects: + - kind: ServiceAccount + name: rook-ceph-purge-osd + namespace: rook-ceph # namespace:cluster +--- +# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-system +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 @@ -892,6 +1130,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" rules: - apiGroups: - "" @@ -940,200 +1182,6 @@ rules: verbs: - delete --- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: cephfs-csi-provisioner-role-cfg - namespace: rook-ceph # namespace:operator -subjects: - - kind: ServiceAccount - name: rook-csi-cephfs-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: Role - name: cephfs-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rbd-csi-provisioner-role-cfg - namespace: rook-ceph # namespace:operator -subjects: - - kind: ServiceAccount - name: rook-csi-rbd-provisioner-sa - namespace: rook-ceph # namespace:operator -roleRef: - kind: Role - name: rbd-external-provisioner-cfg - apiGroup: rbac.authorization.k8s.io ---- -# Allow the operator to create resources in this cluster's namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cluster-mgmt - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-cluster-mgmt -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-cmd-reporter -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-cmd-reporter-psp - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: rook-ceph-cmd-reporter - namespace: rook-ceph # namespace:cluster ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-default-psp - namespace: rook-ceph # namespace:cluster - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: default - namespace: rook-ceph # namespace:cluster ---- -# Allow the ceph mgr to access resources scoped to the CephCluster namespace necessary for mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-mgr -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-mgr-psp - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster ---- -# Allow the ceph mgr to access resources in the Rook operator namespace necessary for mgr modules -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-mgr-system - namespace: rook-ceph # namespace:operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: rook-ceph-mgr-system -subjects: - - kind: ServiceAccount - name: rook-ceph-mgr - namespace: rook-ceph # namespace:cluster ---- -# Allow the osd pods in this namespace to work with configmaps -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rook-ceph-osd-psp - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:rook -subjects: - - kind: ServiceAccount - name: rook-ceph-osd - namespace: rook-ceph # namespace:cluster ---- -# Allow the osd purge job to run in this namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-purge-osd - namespace: rook-ceph # namespace:cluster -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-purge-osd -subjects: - - kind: ServiceAccount - name: rook-ceph-purge-osd - namespace: rook-ceph # namespace:cluster ---- -# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: rook-ceph-system - namespace: rook-ceph # namespace:operator - labels: - operator: rook - storage-backend: ceph -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rook-ceph-system -subjects: - - kind: ServiceAccount - name: rook-ceph-system - namespace: rook-ceph # namespace:operator ---- # Service account for the job that reports the Ceph version in an image apiVersion: v1 kind: ServiceAccount @@ -1143,6 +1191,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" --- # Service account for Ceph mgrs apiVersion: v1 @@ -1153,6 +1205,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" --- # Service account for Ceph OSDs apiVersion: v1 @@ -1164,6 +1220,10 @@ metadata: operator: rook storage-backend: ceph i-am-a-new-label: delete-me + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" --- # Service account for job that purges OSDs from a Rook-Ceph cluster apiVersion: v1 @@ -1181,6 +1241,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator + app.kubernetes.io/managed-by: helm + app.kubernetes.io/created-by: helm + helm.sh/chart: "rook-ceph-0.0.1" --- # Service account for the CephFS CSI driver apiVersion: v1 diff --git a/deploy/charts/library/templates/_cluster-psp.tpl b/deploy/charts/library/templates/_cluster-psp.tpl index 072c9654b7737..1918f7774e86d 100644 --- a/deploy/charts/library/templates/_cluster-psp.tpl +++ b/deploy/charts/library/templates/_cluster-psp.tpl @@ -10,6 +10,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/charts/library/templates/_cluster-serviceaccount.tpl b/deploy/charts/library/templates/_cluster-serviceaccount.tpl index 804556b70357a..8ca0ec806a1f7 100644 --- a/deploy/charts/library/templates/_cluster-serviceaccount.tpl +++ b/deploy/charts/library/templates/_cluster-serviceaccount.tpl @@ -12,6 +12,7 @@ metadata: operator: rook storage-backend: ceph i-am-a-new-label: delete-me + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ include "library.imagePullSecrets" . }} --- # Service account for Ceph mgrs @@ -23,6 +24,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ include "library.imagePullSecrets" . }} --- # Service account for the job that reports the Ceph version in an image @@ -34,6 +36,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ include "library.imagePullSecrets" . }} --- # Service account for job that purges OSDs from a Rook-Ceph cluster diff --git a/deploy/charts/library/templates/_recommended-labels.tpl b/deploy/charts/library/templates/_recommended-labels.tpl new file mode 100644 index 0000000000000..906755c67d44a --- /dev/null +++ b/deploy/charts/library/templates/_recommended-labels.tpl @@ -0,0 +1,9 @@ +{{/* +Common labels +*/}} +{{- define "library.rook-ceph.labels" -}} +app.kubernetes.io/part-of: rook-ceph-operator +app.kubernetes.io/managed-by: helm +app.kubernetes.io/created-by: helm +helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index 83ba8bc9f86cd..30312a9228fcc 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -6,6 +6,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] @@ -26,6 +27,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -58,6 +60,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -183,6 +186,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -239,6 +243,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["secrets", "configmaps"] diff --git a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml index 732ff20e15d70..1e484b25bada5 100644 --- a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml @@ -6,6 +6,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -23,7 +24,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/charts/rook-ceph/templates/deployment.yaml b/deploy/charts/rook-ceph/templates/deployment.yaml index 9eb1c46077da1..3e0d5799c78a5 100644 --- a/deploy/charts/rook-ceph/templates/deployment.yaml +++ b/deploy/charts/rook-ceph/templates/deployment.yaml @@ -5,7 +5,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} spec: replicas: 1 selector: @@ -15,7 +15,7 @@ spec: metadata: labels: app: rook-ceph-operator - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- if .Values.annotations }} annotations: {{ toYaml .Values.annotations | indent 8 }} diff --git a/deploy/charts/rook-ceph/templates/psp.yaml b/deploy/charts/rook-ceph/templates/psp.yaml index 1b46af4888543..e609c5f53b53e 100644 --- a/deploy/charts/rook-ceph/templates/psp.yaml +++ b/deploy/charts/rook-ceph/templates/psp.yaml @@ -84,7 +84,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - policy @@ -102,7 +102,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/charts/rook-ceph/templates/role.yaml b/deploy/charts/rook-ceph/templates/role.yaml index e6185c43d9072..74cefb7cc44bf 100644 --- a/deploy/charts/rook-ceph/templates/role.yaml +++ b/deploy/charts/rook-ceph/templates/role.yaml @@ -8,6 +8,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" diff --git a/deploy/charts/rook-ceph/templates/rolebinding.yaml b/deploy/charts/rook-ceph/templates/rolebinding.yaml index 3b5eaa1aa9bcd..4025df5b65760 100644 --- a/deploy/charts/rook-ceph/templates/rolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/rolebinding.yaml @@ -8,6 +8,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/deploy/charts/rook-ceph/templates/serviceaccount.yaml b/deploy/charts/rook-ceph/templates/serviceaccount.yaml index c6562530b9413..aceeb2a0a6de6 100644 --- a/deploy/charts/rook-ceph/templates/serviceaccount.yaml +++ b/deploy/charts/rook-ceph/templates/serviceaccount.yaml @@ -7,7 +7,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ template "library.imagePullSecrets" . }} --- # Service account for the CephFS CSI driver diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml index 54dbce9937c3b..14fcabaf1e7c1 100644 --- a/deploy/examples/common.yaml +++ b/deploy/examples/common.yaml @@ -41,6 +41,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -70,6 +74,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] @@ -91,6 +99,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -137,6 +149,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -262,6 +278,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -303,6 +323,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: [""] resources: ["secrets", "configmaps"] @@ -359,6 +383,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator # imagePullSecrets: # - name: my-registry-secret @@ -374,6 +402,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -390,6 +422,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -407,6 +443,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index 6ac93d0f81d2e..821d36075d655 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -424,6 +424,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: rook-ceph-operator + app.kubernetes.io/part-of: rook-ceph-operator spec: selector: matchLabels: diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index 6e2512236e196..c4289df0568a9 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -341,6 +341,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: rook-ceph-operator + app.kubernetes.io/part-of: rook-ceph-operator spec: selector: matchLabels: diff --git a/design/ceph/interacting-with-rook-resources.md b/design/ceph/interacting-with-rook-resources.md new file mode 100644 index 0000000000000..016eb65de50e4 --- /dev/null +++ b/design/ceph/interacting-with-rook-resources.md @@ -0,0 +1,43 @@ +# Labels added to Rook-Ceph resources + +[Recommended Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/) are a common set of labels that allows tools to work interoperably, describing objects in a common manner that all tools can understand. + +## Labels added to all Resources created by Rook + +* `app.kubernetes.io/name`: Is the name of the binary running in a container(combination of "ceph-"+daemonType). + +* `app.kubernetes.io/instance`: A unique name identifying the instance of an application. Due to the nature of how resources are named in Rook, this is guaranteed to be unique per CephCluster namespace but not unique within the entire Kubernetes cluster. + +* `app.kubernetes.io/component`: This is populated with the Kind of the resource controlling this application. For example, `cephclusters.ceph.rook.io` or `cephfilesystems.ceph.rook.io`. + +* `app.kubernetes.io/part-of`: This is populated with the Name of the resource controlling this application. + +* `app.kubernetes.io/managed-by`: `rook-ceph-operator` is the tool being used to manage the operation of an application + +* `app.kubernetes.io/created-by`: `rook-ceph-operator` is the controller/user who created this resource + +* `rook.io/operator-namespace`: The namespace in which rook-ceph operator is running. + +An Example of Recommended Labels on Ceph mon with ID=a will look like: +``` + app.kubernetes.io/name : "ceph-mon" + app.kubernetes.io/instance : "a" + app.kubernetes.io/component : "cephclusters.ceph.rook.io" + app.kubernetes.io/part-of : "rook-ceph" + app.kubernetes.io/managed-by : "rook-ceph-operator" + app.kubernetes.io/created-by : "rook-ceph-operator" + rook.io/operator-namespace : "rook-ceph" +``` + +Another example on CephFilesystem with ID=a: +``` + app.kubernetes.io/name : "ceph-mds" + app.kubernetes.io/instance : "myfs-a" + app.kubernetes.io/component : "cephfilesystems.ceph.rook.io" + app.kubernetes.io/part-of : "myfs" + app.kubernetes.io/managed-by : "rook-ceph-operator" + app.kubernetes.io/created-by : "rook-ceph-operator" + rook.io/operator-namespace : "rook-ceph" +``` + +**NOTE** : A totally unique string for an application can be built up from (a) app.kubernetes.io/component, (b) app.kubernetes.io/part-of, (c) the resource's namespace, (d) app.kubernetes.io/name, and (e) app.kubernetes.io/instance fields. For the example above, we could join those fields with underscore connectors like this: cephclusters.ceph.rook.io_rook-ceph_rook-ceph_ceph-mon_a. Note that this full spec can easily exceed the 64-character limit imposed on Kubernetes labels. \ No newline at end of file diff --git a/pkg/operator/ceph/cluster/mgr/spec.go b/pkg/operator/ceph/cluster/mgr/spec.go index 439ccc2418670..9896d25b5239b 100644 --- a/pkg/operator/ceph/cluster/mgr/spec.go +++ b/pkg/operator/ceph/cluster/mgr/spec.go @@ -51,7 +51,7 @@ func (c *Cluster) makeDeployment(mgrConfig *mgrConfig) (*apps.Deployment, error) podSpec := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: mgrConfig.ResourceName, - Labels: c.getPodLabels(mgrConfig.DaemonID, true), + Labels: c.getPodLabels(mgrConfig, true), }, Spec: v1.PodSpec{ InitContainers: []v1.Container{ @@ -111,11 +111,11 @@ func (c *Cluster) makeDeployment(mgrConfig *mgrConfig) (*apps.Deployment, error) ObjectMeta: metav1.ObjectMeta{ Name: mgrConfig.ResourceName, Namespace: c.clusterInfo.Namespace, - Labels: c.getPodLabels(mgrConfig.DaemonID, true), + Labels: c.getPodLabels(mgrConfig, true), }, Spec: apps.DeploymentSpec{ Selector: &metav1.LabelSelector{ - MatchLabels: c.getPodLabels(mgrConfig.DaemonID, false), + MatchLabels: c.getPodLabels(mgrConfig, false), }, Template: podSpec, Replicas: &replicas, @@ -318,10 +318,10 @@ func (c *Cluster) makeDashboardService(name, activeDaemon string) (*v1.Service, return svc, nil } -func (c *Cluster) getPodLabels(daemonName string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, "mgr", daemonName, includeNewLabels) +func (c *Cluster) getPodLabels(mgrConfig *mgrConfig, includeNewLabels bool) map[string]string { + labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, config.MgrType, mgrConfig.DaemonID, c.clusterInfo.NamespacedName().Name, "cephclusters.ceph.rook.io", includeNewLabels) // leave "instance" key for legacy usage - labels["instance"] = daemonName + labels["instance"] = mgrConfig.DaemonID return labels } diff --git a/pkg/operator/ceph/cluster/mgr/spec_test.go b/pkg/operator/ceph/cluster/mgr/spec_test.go index 86ef477c0abd9..ab2c2e1e43a8f 100644 --- a/pkg/operator/ceph/cluster/mgr/spec_test.go +++ b/pkg/operator/ceph/cluster/mgr/spec_test.go @@ -68,7 +68,7 @@ func TestPodSpec(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MgrType, "a", AppName, "ns") + config.MgrType, "a", AppName, "ns", "test", "cephclusters.ceph.rook.io", "ceph-mgr") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.Spec().Containers().RequireAdditionalEnvVars( @@ -76,7 +76,7 @@ func TestPodSpec(t *testing.T) { "ROOK_CEPH_CLUSTER_CRD_NAME") podTemplate.RunFullSuite(config.MgrType, "a", AppName, "ns", "quay.io/ceph/ceph:myceph", "200", "100", "500", "250", /* resources */ - "my-priority-class") + "my-priority-class", "test", "cephclusters.ceph.rook.io", "ceph-mgr") assert.Equal(t, 2, len(d.Spec.Template.Annotations)) assert.Equal(t, 1, len(d.Spec.Template.Spec.Containers)) assert.Equal(t, 5, len(d.Spec.Template.Spec.Containers[0].VolumeMounts)) diff --git a/pkg/operator/ceph/cluster/mon/spec.go b/pkg/operator/ceph/cluster/mon/spec.go index 6316560207d7b..657584197b5b3 100644 --- a/pkg/operator/ceph/cluster/mon/spec.go +++ b/pkg/operator/ceph/cluster/mon/spec.go @@ -43,7 +43,7 @@ const ( func (c *Cluster) getLabels(monConfig *monConfig, canary, includeNewLabels bool) map[string]string { // Mons have a service for each mon, so the additional pod data is relevant for its services // Use pod labels to keep "mon: id" for legacy - labels := controller.CephDaemonAppLabels(AppName, c.Namespace, "mon", monConfig.DaemonName, includeNewLabels) + labels := controller.CephDaemonAppLabels(AppName, c.Namespace, config.MonType, monConfig.DaemonName, c.ClusterInfo.NamespacedName().Name, "cephclusters.ceph.rook.io", includeNewLabels) // Add "mon_cluster: " for legacy labels[monClusterAttr] = c.Namespace if canary { diff --git a/pkg/operator/ceph/cluster/mon/spec_test.go b/pkg/operator/ceph/cluster/mon/spec_test.go index 263c2a57491db..2b213e2be6f87 100644 --- a/pkg/operator/ceph/cluster/mon/spec_test.go +++ b/pkg/operator/ceph/cluster/mon/spec_test.go @@ -82,12 +82,12 @@ func testPodSpec(t *testing.T, monID string, pvc bool) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MonType, monID, AppName, "ns") + config.MonType, monID, AppName, "ns", "default", "cephclusters.ceph.rook.io", "ceph-mon") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.MonType, monID, AppName, "ns", "quay.io/ceph/ceph:myceph", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephclusters.ceph.rook.io", "ceph-mon") } func TestDeploymentPVCSpec(t *testing.T) { diff --git a/pkg/operator/ceph/cluster/osd/labels.go b/pkg/operator/ceph/cluster/osd/labels.go index 6cdde11043b5d..49c87e0cab2d5 100644 --- a/pkg/operator/ceph/cluster/osd/labels.go +++ b/pkg/operator/ceph/cluster/osd/labels.go @@ -21,6 +21,7 @@ import ( "strconv" "strings" + "github.com/rook/rook/pkg/operator/ceph/config" "github.com/rook/rook/pkg/operator/ceph/controller" ) @@ -47,7 +48,7 @@ func makeStorageClassDeviceSetPVCLabel(storageClassDeviceSetName, pvcStorageClas func (c *Cluster) getOSDLabels(osd OSDInfo, failureDomainValue string, portable bool) map[string]string { stringID := fmt.Sprintf("%d", osd.ID) - labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, "osd", stringID, true) + labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, config.OsdType, stringID, c.clusterInfo.NamespacedName().Name, "cephclusters.ceph.rook.io", true) labels[OsdIdLabelKey] = stringID labels[FailureDomainKey] = failureDomainValue labels[portableKey] = strconv.FormatBool(portable) diff --git a/pkg/operator/ceph/cluster/osd/update_test.go b/pkg/operator/ceph/cluster/osd/update_test.go index 3c7e952a684c7..9ca86324ef146 100644 --- a/pkg/operator/ceph/cluster/osd/update_test.go +++ b/pkg/operator/ceph/cluster/osd/update_test.go @@ -519,7 +519,7 @@ func Test_getOSDUpdateInfo(t *testing.T) { addTestDeployment(clientset, "non-rook-deployment", namespace, map[string]string{}) // mon.a in this namespace - l := controller.CephDaemonAppLabels("rook-ceph-mon", namespace, "mon", "a", true) + l := controller.CephDaemonAppLabels("rook-ceph-mon", namespace, "mon", "a", "rook-ceph-operator", "cephclusters.ceph.rook.io", true) addTestDeployment(clientset, "rook-ceph-mon-a", namespace, l) // osd.1 and 3 in another namespace (another Rook cluster) diff --git a/pkg/operator/ceph/cluster/rbd/spec.go b/pkg/operator/ceph/cluster/rbd/spec.go index 7f47c16e42c2e..9a410284a5de4 100644 --- a/pkg/operator/ceph/cluster/rbd/spec.go +++ b/pkg/operator/ceph/cluster/rbd/spec.go @@ -32,7 +32,7 @@ func (r *ReconcileCephRBDMirror) makeDeployment(daemonConfig *daemonConfig, rbdM podSpec := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: daemonConfig.ResourceName, - Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, true), + Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, rbdMirror.Name, "cephrbdmirrors.ceph.rook.io", true), }, Spec: v1.PodSpec{ InitContainers: []v1.Container{ @@ -75,7 +75,7 @@ func (r *ReconcileCephRBDMirror) makeDeployment(daemonConfig *daemonConfig, rbdM Name: daemonConfig.ResourceName, Namespace: rbdMirror.Namespace, Annotations: rbdMirror.Spec.Annotations, - Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, true), + Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, rbdMirror.Name, "cephrbdmirrors.ceph.rook.io", true), }, Spec: apps.DeploymentSpec{ Selector: &metav1.LabelSelector{ diff --git a/pkg/operator/ceph/cluster/rbd/spec_test.go b/pkg/operator/ceph/cluster/rbd/spec_test.go index 12ae65eb3ec66..c64ebd6bca7c1 100644 --- a/pkg/operator/ceph/cluster/rbd/spec_test.go +++ b/pkg/operator/ceph/cluster/rbd/spec_test.go @@ -93,10 +93,10 @@ func TestPodSpec(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.RbdMirrorType, "a", AppName, "ns") + config.RbdMirrorType, "a", AppName, "ns", "a", "cephrbdmirrors.ceph.rook.io", "ceph-rbd-mirror") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.RbdMirrorType, "a", AppName, "ns", "quay.io/ceph/ceph:myceph", "200", "100", "600", "300", /* resources */ - "my-priority-class") + "my-priority-class", "a", "cephrbdmirrors.ceph.rook.io", "ceph-rbd-mirror") } diff --git a/pkg/operator/ceph/controller/spec.go b/pkg/operator/ceph/controller/spec.go index 29e101170d06a..e583260590f70 100644 --- a/pkg/operator/ceph/controller/spec.go +++ b/pkg/operator/ceph/controller/spec.go @@ -388,15 +388,18 @@ func AppLabels(appName, namespace string) map[string]string { } // CephDaemonAppLabels returns pod labels common to all Rook-Ceph pods which may be useful for admins. -// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc. +// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc // Daemon type is the Ceph daemon type: "mon", "mgr", "osd", "mds", "rgw" // Daemon ID is the ID portion of the Ceph daemon name: "a" for "mon.a"; "c" for "mds.c" -func CephDaemonAppLabels(appName, namespace, daemonType, daemonID string, includeNewLabels bool) map[string]string { +// ParentName is the resource metadata.name: "rook-ceph", "my-cluster", etc +// ResourceKind is the CR type: "CephCluster", "CephFilesystem", etc +func CephDaemonAppLabels(appName, namespace, daemonType, daemonID, parentName, resourceKind string, includeNewLabels bool) map[string]string { labels := AppLabels(appName, namespace) // New labels cannot be applied to match selectors during upgrade if includeNewLabels { labels[daemonTypeLabel] = daemonType + k8sutil.AddRecommendedLabels(labels, "ceph-"+daemonType, parentName, resourceKind, daemonID) } labels[DaemonIDLabel] = daemonID // Also report the daemon id keyed by its daemon type: "mon: a", "mds: c", etc. diff --git a/pkg/operator/ceph/file/mds/spec.go b/pkg/operator/ceph/file/mds/spec.go index aa9f17abb6c54..3a8117d590ddb 100644 --- a/pkg/operator/ceph/file/mds/spec.go +++ b/pkg/operator/ceph/file/mds/spec.go @@ -155,7 +155,7 @@ func (c *Cluster) makeMdsDaemonContainer(mdsConfig *mdsConfig) v1.Container { } func (c *Cluster) podLabels(mdsConfig *mdsConfig, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, c.fs.Namespace, "mds", mdsConfig.DaemonID, includeNewLabels) + labels := controller.CephDaemonAppLabels(AppName, c.fs.Namespace, config.MdsType, mdsConfig.DaemonID, c.fs.Name, "cephfilesystems.ceph.rook.io", includeNewLabels) labels["rook_file_system"] = c.fs.Name return labels } diff --git a/pkg/operator/ceph/file/mds/spec_test.go b/pkg/operator/ceph/file/mds/spec_test.go index 5ea991e49eccc..47278d1b761c8 100644 --- a/pkg/operator/ceph/file/mds/spec_test.go +++ b/pkg/operator/ceph/file/mds/spec_test.go @@ -92,12 +92,12 @@ func TestPodSpecs(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MdsType, "myfs-a", "rook-ceph-mds", "ns") + config.MdsType, "myfs-a", "rook-ceph-mds", "ns", "myfs", "cephfilesystems.ceph.rook.io", "ceph-mds") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.MdsType, "myfs-a", "rook-ceph-mds", "ns", "quay.io/ceph/ceph:testversion", "500", "250", "4337", "2169", /* resources */ - "my-priority-class") + "my-priority-class", "myfs", "cephfilesystems.ceph.rook.io", "ceph-mds") // assert --public-addr is appended to args assert.Contains(t, d.Spec.Template.Spec.Containers[0].Args, diff --git a/pkg/operator/ceph/file/mirror/spec.go b/pkg/operator/ceph/file/mirror/spec.go index b3bcccd0ec189..20bd776c90cff 100644 --- a/pkg/operator/ceph/file/mirror/spec.go +++ b/pkg/operator/ceph/file/mirror/spec.go @@ -33,7 +33,7 @@ func (r *ReconcileFilesystemMirror) makeDeployment(daemonConfig *daemonConfig, f ObjectMeta: metav1.ObjectMeta{ Name: daemonConfig.ResourceName, Namespace: fsMirror.Namespace, - Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, true), + Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, fsMirror.Name, "cephfilesystemmirrors.ceph.rook.io", true), }, Spec: v1.PodSpec{ InitContainers: []v1.Container{ @@ -76,8 +76,7 @@ func (r *ReconcileFilesystemMirror) makeDeployment(daemonConfig *daemonConfig, f Name: daemonConfig.ResourceName, Namespace: fsMirror.Namespace, Annotations: fsMirror.Spec.Annotations, - Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, true), - }, + Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, fsMirror.Name, "cephfilesystemmirrors.ceph.rook.io", true)}, Spec: apps.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: podSpec.Labels, diff --git a/pkg/operator/ceph/file/mirror/spec_test.go b/pkg/operator/ceph/file/mirror/spec_test.go index 89d55325a6be2..d41d5aff0b0f0 100644 --- a/pkg/operator/ceph/file/mirror/spec_test.go +++ b/pkg/operator/ceph/file/mirror/spec_test.go @@ -90,10 +90,10 @@ func TestPodSpec(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.FilesystemMirrorType, userID, AppName, "ns") + config.FilesystemMirrorType, userID, AppName, "ns", "fs-mirror", "cephfilesystemmirrors.ceph.rook.io", "ceph-fs-mirror") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.FilesystemMirrorType, userID, AppName, "ns", "quay.io/ceph/ceph:v16", "200", "100", "600", "300", /* resources */ - "my-priority-class") + "my-priority-class", "fs-mirror", "cephfilesystemmirrors.ceph.rook.io", "ceph-fs-mirror") } diff --git a/pkg/operator/ceph/nfs/spec.go b/pkg/operator/ceph/nfs/spec.go index e265e69683224..84e178c5a7f0f 100644 --- a/pkg/operator/ceph/nfs/spec.go +++ b/pkg/operator/ceph/nfs/spec.go @@ -248,7 +248,7 @@ func (r *ReconcileCephNFS) dbusContainer(nfs *cephv1.CephNFS) v1.Container { } func getLabels(n *cephv1.CephNFS, name string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, n.Namespace, "nfs", name, includeNewLabels) + labels := controller.CephDaemonAppLabels(AppName, n.Namespace, "nfs", n.Name+"-"+name, n.Name, "cephnfses.ceph.rook.io", includeNewLabels) labels["ceph_nfs"] = n.Name labels["instance"] = name return labels diff --git a/pkg/operator/ceph/object/spec.go b/pkg/operator/ceph/object/spec.go index ee979ed691605..256a27b74a204 100644 --- a/pkg/operator/ceph/object/spec.go +++ b/pkg/operator/ceph/object/spec.go @@ -27,6 +27,7 @@ import ( "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/daemon/ceph/osd/kms" + "github.com/rook/rook/pkg/operator/ceph/config" cephconfig "github.com/rook/rook/pkg/operator/ceph/config" "github.com/rook/rook/pkg/operator/ceph/controller" cephver "github.com/rook/rook/pkg/operator/ceph/version" @@ -68,11 +69,11 @@ func (c *clusterConfig) createDeployment(rgwConfig *rgwConfig) (*apps.Deployment ObjectMeta: metav1.ObjectMeta{ Name: rgwConfig.ResourceName, Namespace: c.store.Namespace, - Labels: getLabels(c.store.Name, c.store.Namespace, true), + Labels: getLabels(rgwConfig.DaemonID, c.store.Name, c.store.Namespace, true), }, Spec: apps.DeploymentSpec{ Selector: &metav1.LabelSelector{ - MatchLabels: getLabels(c.store.Name, c.store.Namespace, false), + MatchLabels: getLabels(rgwConfig.DaemonID, c.store.Name, c.store.Namespace, false), }, Template: pod, Replicas: &replicas, @@ -175,13 +176,13 @@ func (c *clusterConfig) makeRGWPodSpec(rgwConfig *rgwConfig) (v1.PodTemplateSpec c.store.Spec.Gateway.Placement.ApplyToPodSpec(&podSpec) // If host networking is not enabled, preferred pod anti-affinity is added to the rgw daemons - labels := getLabels(c.store.Name, c.store.Namespace, false) + labels := getLabels(rgwConfig.DaemonID, c.store.Name, c.store.Namespace, false) k8sutil.SetNodeAntiAffinityForPod(&podSpec, c.clusterSpec.Network.IsHost(), v1.LabelHostname, labels, nil) podTemplateSpec := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: rgwConfig.ResourceName, - Labels: getLabels(c.store.Name, c.store.Namespace, true), + Labels: getLabels(rgwConfig.DaemonID, c.store.Name, c.store.Namespace, true), }, Spec: podSpec, } @@ -434,7 +435,7 @@ func (c *clusterConfig) generateService(cephObjectStore *cephv1.CephObjectStore) ObjectMeta: metav1.ObjectMeta{ Name: instanceName(cephObjectStore.Name), Namespace: cephObjectStore.Namespace, - Labels: getLabels(cephObjectStore.Name, cephObjectStore.Namespace, true), + Labels: getLabels(cephObjectStore.Name, cephObjectStore.Name, cephObjectStore.Namespace, true), }, } @@ -453,7 +454,7 @@ func (c *clusterConfig) generateService(cephObjectStore *cephv1.CephObjectStore) } else { // If the cluster is not external we add the Selector svc.Spec = v1.ServiceSpec{ - Selector: getLabels(cephObjectStore.Name, cephObjectStore.Namespace, false), + Selector: getLabels(cephObjectStore.Name, cephObjectStore.Name, cephObjectStore.Namespace, false), } } @@ -464,7 +465,7 @@ func (c *clusterConfig) generateService(cephObjectStore *cephv1.CephObjectStore) } func (c *clusterConfig) generateEndpoint(cephObjectStore *cephv1.CephObjectStore) *v1.Endpoints { - labels := getLabels(cephObjectStore.Name, cephObjectStore.Namespace, true) + labels := getLabels(cephObjectStore.Name, cephObjectStore.Name, cephObjectStore.Namespace, true) endpoints := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ @@ -597,8 +598,8 @@ func addPortToEndpoint(endpoints *v1.Endpoints, name string, port int32) { ) } -func getLabels(name, namespace string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, namespace, "rgw", name, includeNewLabels) +func getLabels(daemonID, name, namespace string, includeNewLabels bool) map[string]string { + labels := controller.CephDaemonAppLabels(AppName, namespace, config.RgwType, daemonID, name, "cephobjectstores.ceph.rook.io", includeNewLabels) labels["rook_object_store"] = name return labels } diff --git a/pkg/operator/ceph/object/spec_test.go b/pkg/operator/ceph/object/spec_test.go index 9208b7ed6bf0a..642b5226af012 100644 --- a/pkg/operator/ceph/object/spec_test.go +++ b/pkg/operator/ceph/object/spec_test.go @@ -71,6 +71,7 @@ func TestPodSpecs(t *testing.T) { resourceName := fmt.Sprintf("%s-%s", AppName, c.store.Name) rgwConfig := &rgwConfig{ ResourceName: resourceName, + DaemonID: "default", } s, err := c.makeRGWPodSpec(rgwConfig) @@ -81,13 +82,13 @@ func TestPodSpecs(t *testing.T) { 1, len(s.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution)) assert.Equal(t, - getLabels(c.store.Name, c.store.Namespace, false), + getLabels("default", c.store.Name, c.store.Namespace, false), s.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution[0].LabelSelector.MatchLabels) podTemplate := cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") } func TestSSLPodSpec(t *testing.T) { @@ -130,6 +131,7 @@ func TestSSLPodSpec(t *testing.T) { resourceName := fmt.Sprintf("%s-%s", AppName, c.store.Name) rgwConfig := &rgwConfig{ ResourceName: resourceName, + DaemonID: "default", } _, err := c.makeRGWPodSpec(rgwConfig) // No TLS certs specified, will return error @@ -158,7 +160,7 @@ func TestSSLPodSpec(t *testing.T) { podTemplate := cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") // TLS Secret c.store.Spec.Gateway.SSLCertificateRef = "tlscert" rgwtlssecret = &v1.Secret{ @@ -182,7 +184,7 @@ func TestSSLPodSpec(t *testing.T) { podTemplate = cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") // Using service serving cert c.store.Spec.Gateway.SSLCertificateRef = "" c.store.Spec.Gateway.Service = &(cephv1.RGWServiceSpec{Annotations: cephv1.Annotations{cephv1.ServiceServingCertKey: "rgw-cert"}}) @@ -212,7 +214,7 @@ func TestSSLPodSpec(t *testing.T) { podTemplate = cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") assert.True(t, s.Spec.HostNetwork) assert.Equal(t, v1.DNSClusterFirstWithHostNet, s.Spec.DNSPolicy) diff --git a/pkg/operator/ceph/test/podtemplatespec.go b/pkg/operator/ceph/test/podtemplatespec.go index e3f97f81586e6..f39ceed57b9fa 100644 --- a/pkg/operator/ceph/test/podtemplatespec.go +++ b/pkg/operator/ceph/test/podtemplatespec.go @@ -37,10 +37,10 @@ func NewPodTemplateSpecTester(t *testing.T, template *v1.PodTemplateSpec) *PodTe // AssertLabelsContainCephRequirements asserts that the PodTemplateSpec under test contains labels // which all Ceph pods should have. func (pt *PodTemplateSpecTester) AssertLabelsContainCephRequirements( - daemonType, daemonID, appName, namespace string, + daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName string, ) { AssertLabelsContainCephRequirements(pt.t, pt.template.ObjectMeta.Labels, - daemonType, daemonID, appName, namespace) + daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName) } // RunFullSuite runs all assertion tests for the PodTemplateSpec under test and its sub-resources. @@ -48,8 +48,8 @@ func (pt *PodTemplateSpecTester) RunFullSuite( daemonType, daemonID, appName, namespace, cephImage, cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest string, - priorityClassName string, + priorityClassName, parentName, resourceKind, appBinaryName string, ) { - pt.AssertLabelsContainCephRequirements(daemonType, daemonID, appName, namespace) + pt.AssertLabelsContainCephRequirements(daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName) pt.Spec().RunFullSuite(daemonType, daemonID, cephImage, cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest, priorityClassName) } diff --git a/pkg/operator/ceph/test/spec.go b/pkg/operator/ceph/test/spec.go index f07eed51c929f..417c25afc7784 100644 --- a/pkg/operator/ceph/test/spec.go +++ b/pkg/operator/ceph/test/spec.go @@ -18,6 +18,7 @@ package test import ( "fmt" + "os" "strings" "testing" @@ -74,7 +75,7 @@ func VerifyPodLabels(appName, namespace, daemonType, daemonID string, labels map // DaemonSets, etc. func AssertLabelsContainCephRequirements( t *testing.T, labels map[string]string, - daemonType, daemonID, appName, namespace string, + daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName string, ) { optest.AssertLabelsContainRookRequirements(t, labels, appName) @@ -83,8 +84,15 @@ func AssertLabelsContainCephRequirements( resourceLabels = append(resourceLabels, fmt.Sprintf("%s=%s", k, v)) } expectedLabels := []string{ + "app.kubernetes.io/created-by=rook-ceph-operator", + "app.kubernetes.io/component=" + resourceKind, + "app.kubernetes.io/instance=" + daemonID, + "app.kubernetes.io/name=" + appBinaryName, + "app.kubernetes.io/managed-by=rook-ceph-operator", + "app.kubernetes.io/part-of=" + parentName, "ceph_daemon_id=" + daemonID, string(daemonType) + "=" + daemonID, + "rook.io/operator-namespace=" + os.Getenv("POD_NAMESPACE"), "rook_cluster" + "=" + namespace, } assert.Subset(t, resourceLabels, expectedLabels, diff --git a/pkg/operator/discover/discover.go b/pkg/operator/discover/discover.go index c77e0ce7ed8b7..028e15a252829 100644 --- a/pkg/operator/discover/discover.go +++ b/pkg/operator/discover/discover.go @@ -88,10 +88,8 @@ func (d *Discover) createDiscoverDaemonSet(ctx context.Context, namespace, disco ds := &apps.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: discoverDaemonsetName, - Labels: map[string]string{ - "app": discoverDaemonsetName, - }, + Name: discoverDaemonsetName, + Labels: getLabels(), }, Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{ @@ -104,9 +102,7 @@ func (d *Discover) createDiscoverDaemonSet(ctx context.Context, namespace, disco }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": discoverDaemonsetName, - }, + Labels: getLabels(), }, Spec: v1.PodSpec{ ServiceAccountName: securityAccount, @@ -241,6 +237,13 @@ func (d *Discover) createDiscoverDaemonSet(ctx context.Context, namespace, disco } +func getLabels() map[string]string { + labels := make(map[string]string) + k8sutil.AddRecommendedLabels(labels, "rook-discover", "rook-ceph-operator", "rook-discover", "rook-discover") + labels["app"] = discoverDaemonsetName + return labels +} + func getEnvVar(varName string, defaultValue string) string { envValue := os.Getenv(varName) if envValue != "" { diff --git a/pkg/operator/k8sutil/labels.go b/pkg/operator/k8sutil/labels.go index b41537fb9989c..c17c6980a6c5b 100644 --- a/pkg/operator/k8sutil/labels.go +++ b/pkg/operator/k8sutil/labels.go @@ -17,6 +17,7 @@ limitations under the License. package k8sutil import ( + "os" "strings" ) @@ -45,3 +46,15 @@ func ParseStringToLabels(in string) map[string]string { return labels } + +// AddRecommendedLabels adds the labels to the resources created by rook +// The labels added are name, instance,etc +func AddRecommendedLabels(labels map[string]string, appName, parentName, resourceKind, resourceInstance string) { + labels["app.kubernetes.io/name"] = appName + labels["app.kubernetes.io/instance"] = resourceInstance + labels["app.kubernetes.io/component"] = resourceKind + labels["app.kubernetes.io/part-of"] = parentName + labels["app.kubernetes.io/managed-by"] = "rook-ceph-operator" + labels["app.kubernetes.io/created-by"] = "rook-ceph-operator" + labels["rook.io/operator-namespace"] = os.Getenv(PodNamespaceEnvVar) +}