From f8a7c5bcb41255e525722114f30721502d893d8e Mon Sep 17 00:00:00 2001 From: parth-gr Date: Mon, 6 Dec 2021 12:52:11 +0530 Subject: [PATCH] core: create rook resources with k8s recommended labels Adding Recommended Labels on the resources created by rook and using Recommended Labels in the helm chart, for better visuals and management of k8s object Closes: https://github.com/rook/rook/issues/8400 Signed-off-by: parth-gr (cherry picked from commit 0a86d26b2e15cd233155ff4379ce8cb58fd46a19) --- build/rbac/keep-rbac-yaml.sh | 4 +- build/rbac/rbac.yaml | 16 +++++++ .../charts/library/templates/_cluster-psp.tpl | 1 + .../templates/_cluster-serviceaccount.tpl | 3 ++ .../library/templates/_recommended-labels.tpl | 9 ++++ .../rook-ceph/templates/clusterrole.yaml | 5 +++ .../templates/clusterrolebinding.yaml | 3 +- .../rook-ceph/templates/deployment.yaml | 4 +- deploy/charts/rook-ceph/templates/psp.yaml | 4 +- deploy/charts/rook-ceph/templates/role.yaml | 1 + .../rook-ceph/templates/rolebinding.yaml | 1 + .../rook-ceph/templates/serviceaccount.yaml | 2 +- deploy/examples/common.yaml | 38 ++++++++++++++++ deploy/examples/operator-openshift.yaml | 4 ++ deploy/examples/operator.yaml | 4 ++ .../ceph/interacting-with-rook-resources.md | 43 +++++++++++++++++++ pkg/operator/ceph/cluster/mgr/spec.go | 12 +++--- pkg/operator/ceph/cluster/mgr/spec_test.go | 4 +- pkg/operator/ceph/cluster/mon/spec.go | 2 +- pkg/operator/ceph/cluster/mon/spec_test.go | 4 +- pkg/operator/ceph/cluster/osd/labels.go | 3 +- pkg/operator/ceph/cluster/osd/update_test.go | 2 +- pkg/operator/ceph/cluster/rbd/spec.go | 4 +- pkg/operator/ceph/cluster/rbd/spec_test.go | 4 +- pkg/operator/ceph/controller/spec.go | 7 ++- pkg/operator/ceph/file/mds/spec.go | 2 +- pkg/operator/ceph/file/mds/spec_test.go | 4 +- pkg/operator/ceph/file/mirror/spec.go | 5 +-- pkg/operator/ceph/file/mirror/spec_test.go | 4 +- pkg/operator/ceph/nfs/spec.go | 2 +- pkg/operator/ceph/object/spec.go | 3 +- pkg/operator/ceph/object/spec_test.go | 10 +++-- pkg/operator/ceph/test/podtemplatespec.go | 8 ++-- pkg/operator/ceph/test/spec.go | 10 ++++- pkg/operator/discover/discover.go | 17 +++++--- pkg/operator/k8sutil/labels.go | 13 ++++++ 36 files changed, 210 insertions(+), 52 deletions(-) create mode 100644 deploy/charts/library/templates/_recommended-labels.tpl create mode 100644 design/ceph/interacting-with-rook-resources.md diff --git a/build/rbac/keep-rbac-yaml.sh b/build/rbac/keep-rbac-yaml.sh index 31ddb84d6ae8..4f4c530ee6f3 100755 --- a/build/rbac/keep-rbac-yaml.sh +++ b/build/rbac/keep-rbac-yaml.sh @@ -27,7 +27,9 @@ $YQ eval ' select(.kind == "Role"), select(.kind == "RoleBinding") ' - | # select all RBAC resource Kinds -$YQ eval 'del(.metadata.labels.chart)' - | # remove the 'chart' label that only applies to Helm-managed resources +$YQ eval 'del(.metadata.labels."helm.sh/chart")' - | # remove the 'helm.sh/chart' label that only applies to Helm-managed resources +$YQ eval 'del(.metadata.labels."app.kubernetes.io/managed-by")' - | # remove the 'labels.app.kubernetes.io/managed-by' label that only applies to Helm-managed resources +$YQ eval 'del(.metadata.labels."app.kubernetes.io/created-by")' - | # remove the 'app.kubernetes.io/created-by' label that only applies to Helm-managed resources sed '/^$/d' | # remove empty lines caused by yq's display of header/footer comments sed '/^# Source: /d' | # helm adds '# Source: ' comments atop of each yaml doc. Strip these $YQ eval --split-exp '.kind + " " + .metadata.name + " "' - # split into files by .yaml diff --git a/build/rbac/rbac.yaml b/build/rbac/rbac.yaml index 2095060f2a66..ae76b78c39e5 100644 --- a/build/rbac/rbac.yaml +++ b/build/rbac/rbac.yaml @@ -75,6 +75,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - policy @@ -186,6 +187,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -218,6 +220,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -343,6 +346,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -399,6 +403,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: [""] resources: ["secrets", "configmaps"] @@ -464,6 +469,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator rules: # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] @@ -536,6 +542,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -594,6 +601,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -610,6 +618,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -892,6 +901,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -1019,6 +1029,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -1125,6 +1136,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -1143,6 +1155,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator --- # Service account for Ceph mgrs apiVersion: v1 @@ -1153,6 +1166,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator --- # Service account for Ceph OSDs apiVersion: v1 @@ -1164,6 +1178,7 @@ metadata: operator: rook storage-backend: ceph i-am-a-new-label: delete-me + app.kubernetes.io/part-of: rook-ceph-operator --- # Service account for job that purges OSDs from a Rook-Ceph cluster apiVersion: v1 @@ -1181,6 +1196,7 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/part-of: rook-ceph-operator --- # Service account for the CephFS CSI driver apiVersion: v1 diff --git a/deploy/charts/library/templates/_cluster-psp.tpl b/deploy/charts/library/templates/_cluster-psp.tpl index 072c9654b773..1918f7774e86 100644 --- a/deploy/charts/library/templates/_cluster-psp.tpl +++ b/deploy/charts/library/templates/_cluster-psp.tpl @@ -10,6 +10,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/charts/library/templates/_cluster-serviceaccount.tpl b/deploy/charts/library/templates/_cluster-serviceaccount.tpl index 804556b70357..8ca0ec806a1f 100644 --- a/deploy/charts/library/templates/_cluster-serviceaccount.tpl +++ b/deploy/charts/library/templates/_cluster-serviceaccount.tpl @@ -12,6 +12,7 @@ metadata: operator: rook storage-backend: ceph i-am-a-new-label: delete-me + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ include "library.imagePullSecrets" . }} --- # Service account for Ceph mgrs @@ -23,6 +24,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ include "library.imagePullSecrets" . }} --- # Service account for the job that reports the Ceph version in an image @@ -34,6 +36,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ include "library.imagePullSecrets" . }} --- # Service account for job that purges OSDs from a Rook-Ceph cluster diff --git a/deploy/charts/library/templates/_recommended-labels.tpl b/deploy/charts/library/templates/_recommended-labels.tpl new file mode 100644 index 000000000000..906755c67d44 --- /dev/null +++ b/deploy/charts/library/templates/_recommended-labels.tpl @@ -0,0 +1,9 @@ +{{/* +Common labels +*/}} +{{- define "library.rook-ceph.labels" -}} +app.kubernetes.io/part-of: rook-ceph-operator +app.kubernetes.io/managed-by: helm +app.kubernetes.io/created-by: helm +helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- end -}} diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index 83ba8bc9f86c..30312a9228fc 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -6,6 +6,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] @@ -26,6 +27,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -58,6 +60,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -183,6 +186,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" @@ -239,6 +243,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: [""] resources: ["secrets", "configmaps"] diff --git a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml index 732ff20e15d7..1e484b25bada 100644 --- a/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrolebinding.yaml @@ -6,6 +6,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -23,7 +24,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/charts/rook-ceph/templates/deployment.yaml b/deploy/charts/rook-ceph/templates/deployment.yaml index 879cb66ed4ae..98f0427007c7 100644 --- a/deploy/charts/rook-ceph/templates/deployment.yaml +++ b/deploy/charts/rook-ceph/templates/deployment.yaml @@ -5,7 +5,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} spec: replicas: 1 selector: @@ -15,7 +15,7 @@ spec: metadata: labels: app: rook-ceph-operator - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" {{- if .Values.annotations }} annotations: {{ toYaml .Values.annotations | indent 8 }} diff --git a/deploy/charts/rook-ceph/templates/psp.yaml b/deploy/charts/rook-ceph/templates/psp.yaml index 1b46af488854..e609c5f53b53 100644 --- a/deploy/charts/rook-ceph/templates/psp.yaml +++ b/deploy/charts/rook-ceph/templates/psp.yaml @@ -84,7 +84,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - policy @@ -102,7 +102,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/charts/rook-ceph/templates/role.yaml b/deploy/charts/rook-ceph/templates/role.yaml index e6185c43d907..74cefb7cc44b 100644 --- a/deploy/charts/rook-ceph/templates/role.yaml +++ b/deploy/charts/rook-ceph/templates/role.yaml @@ -8,6 +8,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} rules: - apiGroups: - "" diff --git a/deploy/charts/rook-ceph/templates/rolebinding.yaml b/deploy/charts/rook-ceph/templates/rolebinding.yaml index 3b5eaa1aa9bc..4025df5b6576 100644 --- a/deploy/charts/rook-ceph/templates/rolebinding.yaml +++ b/deploy/charts/rook-ceph/templates/rolebinding.yaml @@ -8,6 +8,7 @@ metadata: labels: operator: rook storage-backend: ceph + {{- include "library.rook-ceph.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/deploy/charts/rook-ceph/templates/serviceaccount.yaml b/deploy/charts/rook-ceph/templates/serviceaccount.yaml index c6562530b941..aceeb2a0a6de 100644 --- a/deploy/charts/rook-ceph/templates/serviceaccount.yaml +++ b/deploy/charts/rook-ceph/templates/serviceaccount.yaml @@ -7,7 +7,7 @@ metadata: labels: operator: rook storage-backend: ceph - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + {{- include "library.rook-ceph.labels" . | nindent 4 }} {{ template "library.imagePullSecrets" . }} --- # Service account for the CephFS CSI driver diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml index 6919d3168f3b..bcc3fc57beba 100644 --- a/deploy/examples/common.yaml +++ b/deploy/examples/common.yaml @@ -84,6 +84,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: "psp:rook" + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - policy @@ -98,6 +105,13 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: rbd-csi-nodeplugin + labels: + operator: rook + storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: [""] resources: ["secrets"] @@ -195,6 +209,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -227,6 +245,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -352,6 +374,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: - "" @@ -408,6 +434,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: - apiGroups: [""] resources: ["secrets", "configmaps"] @@ -473,6 +503,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator rules: # Most resources are represented by a string representation of their name, such as "pods", just as it appears in the URL for the relevant API endpoint. # However, some Kubernetes APIs involve a "subresource", such as the logs for a pod. [...] @@ -545,6 +579,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: ceph-csi + app.kubernetes.io/part-of: rook-ceph-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index 8404d7826103..081647053e09 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -428,6 +428,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: rook-ceph-operator + app.kubernetes.io/part-of: rook-ceph-operator spec: selector: matchLabels: diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index 9002d6bd340e..dce9b25773f1 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -345,6 +345,10 @@ metadata: labels: operator: rook storage-backend: ceph + app.kubernetes.io/name: rook-ceph + app.kubernetes.io/instance: rook-ceph + app.kubernetes.io/component: rook-ceph-operator + app.kubernetes.io/part-of: rook-ceph-operator spec: selector: matchLabels: diff --git a/design/ceph/interacting-with-rook-resources.md b/design/ceph/interacting-with-rook-resources.md new file mode 100644 index 000000000000..016eb65de50e --- /dev/null +++ b/design/ceph/interacting-with-rook-resources.md @@ -0,0 +1,43 @@ +# Labels added to Rook-Ceph resources + +[Recommended Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/) are a common set of labels that allows tools to work interoperably, describing objects in a common manner that all tools can understand. + +## Labels added to all Resources created by Rook + +* `app.kubernetes.io/name`: Is the name of the binary running in a container(combination of "ceph-"+daemonType). + +* `app.kubernetes.io/instance`: A unique name identifying the instance of an application. Due to the nature of how resources are named in Rook, this is guaranteed to be unique per CephCluster namespace but not unique within the entire Kubernetes cluster. + +* `app.kubernetes.io/component`: This is populated with the Kind of the resource controlling this application. For example, `cephclusters.ceph.rook.io` or `cephfilesystems.ceph.rook.io`. + +* `app.kubernetes.io/part-of`: This is populated with the Name of the resource controlling this application. + +* `app.kubernetes.io/managed-by`: `rook-ceph-operator` is the tool being used to manage the operation of an application + +* `app.kubernetes.io/created-by`: `rook-ceph-operator` is the controller/user who created this resource + +* `rook.io/operator-namespace`: The namespace in which rook-ceph operator is running. + +An Example of Recommended Labels on Ceph mon with ID=a will look like: +``` + app.kubernetes.io/name : "ceph-mon" + app.kubernetes.io/instance : "a" + app.kubernetes.io/component : "cephclusters.ceph.rook.io" + app.kubernetes.io/part-of : "rook-ceph" + app.kubernetes.io/managed-by : "rook-ceph-operator" + app.kubernetes.io/created-by : "rook-ceph-operator" + rook.io/operator-namespace : "rook-ceph" +``` + +Another example on CephFilesystem with ID=a: +``` + app.kubernetes.io/name : "ceph-mds" + app.kubernetes.io/instance : "myfs-a" + app.kubernetes.io/component : "cephfilesystems.ceph.rook.io" + app.kubernetes.io/part-of : "myfs" + app.kubernetes.io/managed-by : "rook-ceph-operator" + app.kubernetes.io/created-by : "rook-ceph-operator" + rook.io/operator-namespace : "rook-ceph" +``` + +**NOTE** : A totally unique string for an application can be built up from (a) app.kubernetes.io/component, (b) app.kubernetes.io/part-of, (c) the resource's namespace, (d) app.kubernetes.io/name, and (e) app.kubernetes.io/instance fields. For the example above, we could join those fields with underscore connectors like this: cephclusters.ceph.rook.io_rook-ceph_rook-ceph_ceph-mon_a. Note that this full spec can easily exceed the 64-character limit imposed on Kubernetes labels. \ No newline at end of file diff --git a/pkg/operator/ceph/cluster/mgr/spec.go b/pkg/operator/ceph/cluster/mgr/spec.go index 439ccc241867..9896d25b5239 100644 --- a/pkg/operator/ceph/cluster/mgr/spec.go +++ b/pkg/operator/ceph/cluster/mgr/spec.go @@ -51,7 +51,7 @@ func (c *Cluster) makeDeployment(mgrConfig *mgrConfig) (*apps.Deployment, error) podSpec := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: mgrConfig.ResourceName, - Labels: c.getPodLabels(mgrConfig.DaemonID, true), + Labels: c.getPodLabels(mgrConfig, true), }, Spec: v1.PodSpec{ InitContainers: []v1.Container{ @@ -111,11 +111,11 @@ func (c *Cluster) makeDeployment(mgrConfig *mgrConfig) (*apps.Deployment, error) ObjectMeta: metav1.ObjectMeta{ Name: mgrConfig.ResourceName, Namespace: c.clusterInfo.Namespace, - Labels: c.getPodLabels(mgrConfig.DaemonID, true), + Labels: c.getPodLabels(mgrConfig, true), }, Spec: apps.DeploymentSpec{ Selector: &metav1.LabelSelector{ - MatchLabels: c.getPodLabels(mgrConfig.DaemonID, false), + MatchLabels: c.getPodLabels(mgrConfig, false), }, Template: podSpec, Replicas: &replicas, @@ -318,10 +318,10 @@ func (c *Cluster) makeDashboardService(name, activeDaemon string) (*v1.Service, return svc, nil } -func (c *Cluster) getPodLabels(daemonName string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, "mgr", daemonName, includeNewLabels) +func (c *Cluster) getPodLabels(mgrConfig *mgrConfig, includeNewLabels bool) map[string]string { + labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, config.MgrType, mgrConfig.DaemonID, c.clusterInfo.NamespacedName().Name, "cephclusters.ceph.rook.io", includeNewLabels) // leave "instance" key for legacy usage - labels["instance"] = daemonName + labels["instance"] = mgrConfig.DaemonID return labels } diff --git a/pkg/operator/ceph/cluster/mgr/spec_test.go b/pkg/operator/ceph/cluster/mgr/spec_test.go index 86ef477c0abd..ab2c2e1e43a8 100644 --- a/pkg/operator/ceph/cluster/mgr/spec_test.go +++ b/pkg/operator/ceph/cluster/mgr/spec_test.go @@ -68,7 +68,7 @@ func TestPodSpec(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MgrType, "a", AppName, "ns") + config.MgrType, "a", AppName, "ns", "test", "cephclusters.ceph.rook.io", "ceph-mgr") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.Spec().Containers().RequireAdditionalEnvVars( @@ -76,7 +76,7 @@ func TestPodSpec(t *testing.T) { "ROOK_CEPH_CLUSTER_CRD_NAME") podTemplate.RunFullSuite(config.MgrType, "a", AppName, "ns", "quay.io/ceph/ceph:myceph", "200", "100", "500", "250", /* resources */ - "my-priority-class") + "my-priority-class", "test", "cephclusters.ceph.rook.io", "ceph-mgr") assert.Equal(t, 2, len(d.Spec.Template.Annotations)) assert.Equal(t, 1, len(d.Spec.Template.Spec.Containers)) assert.Equal(t, 5, len(d.Spec.Template.Spec.Containers[0].VolumeMounts)) diff --git a/pkg/operator/ceph/cluster/mon/spec.go b/pkg/operator/ceph/cluster/mon/spec.go index 6316560207d7..657584197b5b 100644 --- a/pkg/operator/ceph/cluster/mon/spec.go +++ b/pkg/operator/ceph/cluster/mon/spec.go @@ -43,7 +43,7 @@ const ( func (c *Cluster) getLabels(monConfig *monConfig, canary, includeNewLabels bool) map[string]string { // Mons have a service for each mon, so the additional pod data is relevant for its services // Use pod labels to keep "mon: id" for legacy - labels := controller.CephDaemonAppLabels(AppName, c.Namespace, "mon", monConfig.DaemonName, includeNewLabels) + labels := controller.CephDaemonAppLabels(AppName, c.Namespace, config.MonType, monConfig.DaemonName, c.ClusterInfo.NamespacedName().Name, "cephclusters.ceph.rook.io", includeNewLabels) // Add "mon_cluster: " for legacy labels[monClusterAttr] = c.Namespace if canary { diff --git a/pkg/operator/ceph/cluster/mon/spec_test.go b/pkg/operator/ceph/cluster/mon/spec_test.go index 263c2a57491d..2b213e2be6f8 100644 --- a/pkg/operator/ceph/cluster/mon/spec_test.go +++ b/pkg/operator/ceph/cluster/mon/spec_test.go @@ -82,12 +82,12 @@ func testPodSpec(t *testing.T, monID string, pvc bool) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MonType, monID, AppName, "ns") + config.MonType, monID, AppName, "ns", "default", "cephclusters.ceph.rook.io", "ceph-mon") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.MonType, monID, AppName, "ns", "quay.io/ceph/ceph:myceph", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephclusters.ceph.rook.io", "ceph-mon") } func TestDeploymentPVCSpec(t *testing.T) { diff --git a/pkg/operator/ceph/cluster/osd/labels.go b/pkg/operator/ceph/cluster/osd/labels.go index 6cdde11043b5..49c87e0cab2d 100644 --- a/pkg/operator/ceph/cluster/osd/labels.go +++ b/pkg/operator/ceph/cluster/osd/labels.go @@ -21,6 +21,7 @@ import ( "strconv" "strings" + "github.com/rook/rook/pkg/operator/ceph/config" "github.com/rook/rook/pkg/operator/ceph/controller" ) @@ -47,7 +48,7 @@ func makeStorageClassDeviceSetPVCLabel(storageClassDeviceSetName, pvcStorageClas func (c *Cluster) getOSDLabels(osd OSDInfo, failureDomainValue string, portable bool) map[string]string { stringID := fmt.Sprintf("%d", osd.ID) - labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, "osd", stringID, true) + labels := controller.CephDaemonAppLabels(AppName, c.clusterInfo.Namespace, config.OsdType, stringID, c.clusterInfo.NamespacedName().Name, "cephclusters.ceph.rook.io", true) labels[OsdIdLabelKey] = stringID labels[FailureDomainKey] = failureDomainValue labels[portableKey] = strconv.FormatBool(portable) diff --git a/pkg/operator/ceph/cluster/osd/update_test.go b/pkg/operator/ceph/cluster/osd/update_test.go index 3c7e952a684c..9ca86324ef14 100644 --- a/pkg/operator/ceph/cluster/osd/update_test.go +++ b/pkg/operator/ceph/cluster/osd/update_test.go @@ -519,7 +519,7 @@ func Test_getOSDUpdateInfo(t *testing.T) { addTestDeployment(clientset, "non-rook-deployment", namespace, map[string]string{}) // mon.a in this namespace - l := controller.CephDaemonAppLabels("rook-ceph-mon", namespace, "mon", "a", true) + l := controller.CephDaemonAppLabels("rook-ceph-mon", namespace, "mon", "a", "rook-ceph-operator", "cephclusters.ceph.rook.io", true) addTestDeployment(clientset, "rook-ceph-mon-a", namespace, l) // osd.1 and 3 in another namespace (another Rook cluster) diff --git a/pkg/operator/ceph/cluster/rbd/spec.go b/pkg/operator/ceph/cluster/rbd/spec.go index 7f47c16e42c2..9a410284a5de 100644 --- a/pkg/operator/ceph/cluster/rbd/spec.go +++ b/pkg/operator/ceph/cluster/rbd/spec.go @@ -32,7 +32,7 @@ func (r *ReconcileCephRBDMirror) makeDeployment(daemonConfig *daemonConfig, rbdM podSpec := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: daemonConfig.ResourceName, - Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, true), + Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, rbdMirror.Name, "cephrbdmirrors.ceph.rook.io", true), }, Spec: v1.PodSpec{ InitContainers: []v1.Container{ @@ -75,7 +75,7 @@ func (r *ReconcileCephRBDMirror) makeDeployment(daemonConfig *daemonConfig, rbdM Name: daemonConfig.ResourceName, Namespace: rbdMirror.Namespace, Annotations: rbdMirror.Spec.Annotations, - Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, true), + Labels: controller.CephDaemonAppLabels(AppName, rbdMirror.Namespace, config.RbdMirrorType, daemonConfig.DaemonID, rbdMirror.Name, "cephrbdmirrors.ceph.rook.io", true), }, Spec: apps.DeploymentSpec{ Selector: &metav1.LabelSelector{ diff --git a/pkg/operator/ceph/cluster/rbd/spec_test.go b/pkg/operator/ceph/cluster/rbd/spec_test.go index 12ae65eb3ec6..c64ebd6bca7c 100644 --- a/pkg/operator/ceph/cluster/rbd/spec_test.go +++ b/pkg/operator/ceph/cluster/rbd/spec_test.go @@ -93,10 +93,10 @@ func TestPodSpec(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.RbdMirrorType, "a", AppName, "ns") + config.RbdMirrorType, "a", AppName, "ns", "a", "cephrbdmirrors.ceph.rook.io", "ceph-rbd-mirror") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.RbdMirrorType, "a", AppName, "ns", "quay.io/ceph/ceph:myceph", "200", "100", "600", "300", /* resources */ - "my-priority-class") + "my-priority-class", "a", "cephrbdmirrors.ceph.rook.io", "ceph-rbd-mirror") } diff --git a/pkg/operator/ceph/controller/spec.go b/pkg/operator/ceph/controller/spec.go index 29e101170d06..e583260590f7 100644 --- a/pkg/operator/ceph/controller/spec.go +++ b/pkg/operator/ceph/controller/spec.go @@ -388,15 +388,18 @@ func AppLabels(appName, namespace string) map[string]string { } // CephDaemonAppLabels returns pod labels common to all Rook-Ceph pods which may be useful for admins. -// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc. +// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc // Daemon type is the Ceph daemon type: "mon", "mgr", "osd", "mds", "rgw" // Daemon ID is the ID portion of the Ceph daemon name: "a" for "mon.a"; "c" for "mds.c" -func CephDaemonAppLabels(appName, namespace, daemonType, daemonID string, includeNewLabels bool) map[string]string { +// ParentName is the resource metadata.name: "rook-ceph", "my-cluster", etc +// ResourceKind is the CR type: "CephCluster", "CephFilesystem", etc +func CephDaemonAppLabels(appName, namespace, daemonType, daemonID, parentName, resourceKind string, includeNewLabels bool) map[string]string { labels := AppLabels(appName, namespace) // New labels cannot be applied to match selectors during upgrade if includeNewLabels { labels[daemonTypeLabel] = daemonType + k8sutil.AddRecommendedLabels(labels, "ceph-"+daemonType, parentName, resourceKind, daemonID) } labels[DaemonIDLabel] = daemonID // Also report the daemon id keyed by its daemon type: "mon: a", "mds: c", etc. diff --git a/pkg/operator/ceph/file/mds/spec.go b/pkg/operator/ceph/file/mds/spec.go index aa9f17abb6c5..3a8117d590dd 100644 --- a/pkg/operator/ceph/file/mds/spec.go +++ b/pkg/operator/ceph/file/mds/spec.go @@ -155,7 +155,7 @@ func (c *Cluster) makeMdsDaemonContainer(mdsConfig *mdsConfig) v1.Container { } func (c *Cluster) podLabels(mdsConfig *mdsConfig, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, c.fs.Namespace, "mds", mdsConfig.DaemonID, includeNewLabels) + labels := controller.CephDaemonAppLabels(AppName, c.fs.Namespace, config.MdsType, mdsConfig.DaemonID, c.fs.Name, "cephfilesystems.ceph.rook.io", includeNewLabels) labels["rook_file_system"] = c.fs.Name return labels } diff --git a/pkg/operator/ceph/file/mds/spec_test.go b/pkg/operator/ceph/file/mds/spec_test.go index 5ea991e49ecc..47278d1b761c 100644 --- a/pkg/operator/ceph/file/mds/spec_test.go +++ b/pkg/operator/ceph/file/mds/spec_test.go @@ -92,12 +92,12 @@ func TestPodSpecs(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.MdsType, "myfs-a", "rook-ceph-mds", "ns") + config.MdsType, "myfs-a", "rook-ceph-mds", "ns", "myfs", "cephfilesystems.ceph.rook.io", "ceph-mds") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.MdsType, "myfs-a", "rook-ceph-mds", "ns", "quay.io/ceph/ceph:testversion", "500", "250", "4337", "2169", /* resources */ - "my-priority-class") + "my-priority-class", "myfs", "cephfilesystems.ceph.rook.io", "ceph-mds") // assert --public-addr is appended to args assert.Contains(t, d.Spec.Template.Spec.Containers[0].Args, diff --git a/pkg/operator/ceph/file/mirror/spec.go b/pkg/operator/ceph/file/mirror/spec.go index b3bcccd0ec18..20bd776c90cf 100644 --- a/pkg/operator/ceph/file/mirror/spec.go +++ b/pkg/operator/ceph/file/mirror/spec.go @@ -33,7 +33,7 @@ func (r *ReconcileFilesystemMirror) makeDeployment(daemonConfig *daemonConfig, f ObjectMeta: metav1.ObjectMeta{ Name: daemonConfig.ResourceName, Namespace: fsMirror.Namespace, - Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, true), + Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, fsMirror.Name, "cephfilesystemmirrors.ceph.rook.io", true), }, Spec: v1.PodSpec{ InitContainers: []v1.Container{ @@ -76,8 +76,7 @@ func (r *ReconcileFilesystemMirror) makeDeployment(daemonConfig *daemonConfig, f Name: daemonConfig.ResourceName, Namespace: fsMirror.Namespace, Annotations: fsMirror.Spec.Annotations, - Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, true), - }, + Labels: controller.CephDaemonAppLabels(AppName, fsMirror.Namespace, config.FilesystemMirrorType, userID, fsMirror.Name, "cephfilesystemmirrors.ceph.rook.io", true)}, Spec: apps.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: podSpec.Labels, diff --git a/pkg/operator/ceph/file/mirror/spec_test.go b/pkg/operator/ceph/file/mirror/spec_test.go index 89d55325a6be..d41d5aff0b0f 100644 --- a/pkg/operator/ceph/file/mirror/spec_test.go +++ b/pkg/operator/ceph/file/mirror/spec_test.go @@ -90,10 +90,10 @@ func TestPodSpec(t *testing.T) { // Deployment should have Ceph labels test.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels, - config.FilesystemMirrorType, userID, AppName, "ns") + config.FilesystemMirrorType, userID, AppName, "ns", "fs-mirror", "cephfilesystemmirrors.ceph.rook.io", "ceph-fs-mirror") podTemplate := test.NewPodTemplateSpecTester(t, &d.Spec.Template) podTemplate.RunFullSuite(config.FilesystemMirrorType, userID, AppName, "ns", "quay.io/ceph/ceph:v16", "200", "100", "600", "300", /* resources */ - "my-priority-class") + "my-priority-class", "fs-mirror", "cephfilesystemmirrors.ceph.rook.io", "ceph-fs-mirror") } diff --git a/pkg/operator/ceph/nfs/spec.go b/pkg/operator/ceph/nfs/spec.go index e265e6968322..84e178c5a7f0 100644 --- a/pkg/operator/ceph/nfs/spec.go +++ b/pkg/operator/ceph/nfs/spec.go @@ -248,7 +248,7 @@ func (r *ReconcileCephNFS) dbusContainer(nfs *cephv1.CephNFS) v1.Container { } func getLabels(n *cephv1.CephNFS, name string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, n.Namespace, "nfs", name, includeNewLabels) + labels := controller.CephDaemonAppLabels(AppName, n.Namespace, "nfs", n.Name+"-"+name, n.Name, "cephnfses.ceph.rook.io", includeNewLabels) labels["ceph_nfs"] = n.Name labels["instance"] = name return labels diff --git a/pkg/operator/ceph/object/spec.go b/pkg/operator/ceph/object/spec.go index ee979ed69160..2152b3275bd4 100644 --- a/pkg/operator/ceph/object/spec.go +++ b/pkg/operator/ceph/object/spec.go @@ -27,6 +27,7 @@ import ( "github.com/pkg/errors" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/daemon/ceph/osd/kms" + "github.com/rook/rook/pkg/operator/ceph/config" cephconfig "github.com/rook/rook/pkg/operator/ceph/config" "github.com/rook/rook/pkg/operator/ceph/controller" cephver "github.com/rook/rook/pkg/operator/ceph/version" @@ -598,7 +599,7 @@ func addPortToEndpoint(endpoints *v1.Endpoints, name string, port int32) { } func getLabels(name, namespace string, includeNewLabels bool) map[string]string { - labels := controller.CephDaemonAppLabels(AppName, namespace, "rgw", name, includeNewLabels) + labels := controller.CephDaemonAppLabels(AppName, namespace, config.RgwType, name, name, "cephobjectstores.ceph.rook.io", includeNewLabels) labels["rook_object_store"] = name return labels } diff --git a/pkg/operator/ceph/object/spec_test.go b/pkg/operator/ceph/object/spec_test.go index 9208b7ed6bf0..28ea5c97047e 100644 --- a/pkg/operator/ceph/object/spec_test.go +++ b/pkg/operator/ceph/object/spec_test.go @@ -71,6 +71,7 @@ func TestPodSpecs(t *testing.T) { resourceName := fmt.Sprintf("%s-%s", AppName, c.store.Name) rgwConfig := &rgwConfig{ ResourceName: resourceName, + DaemonID: "default", } s, err := c.makeRGWPodSpec(rgwConfig) @@ -87,7 +88,7 @@ func TestPodSpecs(t *testing.T) { podTemplate := cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") } func TestSSLPodSpec(t *testing.T) { @@ -130,6 +131,7 @@ func TestSSLPodSpec(t *testing.T) { resourceName := fmt.Sprintf("%s-%s", AppName, c.store.Name) rgwConfig := &rgwConfig{ ResourceName: resourceName, + DaemonID: "default", } _, err := c.makeRGWPodSpec(rgwConfig) // No TLS certs specified, will return error @@ -158,7 +160,7 @@ func TestSSLPodSpec(t *testing.T) { podTemplate := cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") // TLS Secret c.store.Spec.Gateway.SSLCertificateRef = "tlscert" rgwtlssecret = &v1.Secret{ @@ -182,7 +184,7 @@ func TestSSLPodSpec(t *testing.T) { podTemplate = cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") // Using service serving cert c.store.Spec.Gateway.SSLCertificateRef = "" c.store.Spec.Gateway.Service = &(cephv1.RGWServiceSpec{Annotations: cephv1.Annotations{cephv1.ServiceServingCertKey: "rgw-cert"}}) @@ -212,7 +214,7 @@ func TestSSLPodSpec(t *testing.T) { podTemplate = cephtest.NewPodTemplateSpecTester(t, &s) podTemplate.RunFullSuite(cephconfig.RgwType, "default", "rook-ceph-rgw", "mycluster", "quay.io/ceph/ceph:myversion", "200", "100", "1337", "500", /* resources */ - "my-priority-class") + "my-priority-class", "default", "cephobjectstores.ceph.rook.io", "ceph-rgw") assert.True(t, s.Spec.HostNetwork) assert.Equal(t, v1.DNSClusterFirstWithHostNet, s.Spec.DNSPolicy) diff --git a/pkg/operator/ceph/test/podtemplatespec.go b/pkg/operator/ceph/test/podtemplatespec.go index e3f97f81586e..f39ceed57b9f 100644 --- a/pkg/operator/ceph/test/podtemplatespec.go +++ b/pkg/operator/ceph/test/podtemplatespec.go @@ -37,10 +37,10 @@ func NewPodTemplateSpecTester(t *testing.T, template *v1.PodTemplateSpec) *PodTe // AssertLabelsContainCephRequirements asserts that the PodTemplateSpec under test contains labels // which all Ceph pods should have. func (pt *PodTemplateSpecTester) AssertLabelsContainCephRequirements( - daemonType, daemonID, appName, namespace string, + daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName string, ) { AssertLabelsContainCephRequirements(pt.t, pt.template.ObjectMeta.Labels, - daemonType, daemonID, appName, namespace) + daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName) } // RunFullSuite runs all assertion tests for the PodTemplateSpec under test and its sub-resources. @@ -48,8 +48,8 @@ func (pt *PodTemplateSpecTester) RunFullSuite( daemonType, daemonID, appName, namespace, cephImage, cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest string, - priorityClassName string, + priorityClassName, parentName, resourceKind, appBinaryName string, ) { - pt.AssertLabelsContainCephRequirements(daemonType, daemonID, appName, namespace) + pt.AssertLabelsContainCephRequirements(daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName) pt.Spec().RunFullSuite(daemonType, daemonID, cephImage, cpuResourceLimit, cpuResourceRequest, memoryResourceLimit, memoryResourceRequest, priorityClassName) } diff --git a/pkg/operator/ceph/test/spec.go b/pkg/operator/ceph/test/spec.go index f07eed51c929..417c25afc778 100644 --- a/pkg/operator/ceph/test/spec.go +++ b/pkg/operator/ceph/test/spec.go @@ -18,6 +18,7 @@ package test import ( "fmt" + "os" "strings" "testing" @@ -74,7 +75,7 @@ func VerifyPodLabels(appName, namespace, daemonType, daemonID string, labels map // DaemonSets, etc. func AssertLabelsContainCephRequirements( t *testing.T, labels map[string]string, - daemonType, daemonID, appName, namespace string, + daemonType, daemonID, appName, namespace, parentName, resourceKind, appBinaryName string, ) { optest.AssertLabelsContainRookRequirements(t, labels, appName) @@ -83,8 +84,15 @@ func AssertLabelsContainCephRequirements( resourceLabels = append(resourceLabels, fmt.Sprintf("%s=%s", k, v)) } expectedLabels := []string{ + "app.kubernetes.io/created-by=rook-ceph-operator", + "app.kubernetes.io/component=" + resourceKind, + "app.kubernetes.io/instance=" + daemonID, + "app.kubernetes.io/name=" + appBinaryName, + "app.kubernetes.io/managed-by=rook-ceph-operator", + "app.kubernetes.io/part-of=" + parentName, "ceph_daemon_id=" + daemonID, string(daemonType) + "=" + daemonID, + "rook.io/operator-namespace=" + os.Getenv("POD_NAMESPACE"), "rook_cluster" + "=" + namespace, } assert.Subset(t, resourceLabels, expectedLabels, diff --git a/pkg/operator/discover/discover.go b/pkg/operator/discover/discover.go index c77e0ce7ed8b..028e15a25282 100644 --- a/pkg/operator/discover/discover.go +++ b/pkg/operator/discover/discover.go @@ -88,10 +88,8 @@ func (d *Discover) createDiscoverDaemonSet(ctx context.Context, namespace, disco ds := &apps.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: discoverDaemonsetName, - Labels: map[string]string{ - "app": discoverDaemonsetName, - }, + Name: discoverDaemonsetName, + Labels: getLabels(), }, Spec: apps.DaemonSetSpec{ Selector: &metav1.LabelSelector{ @@ -104,9 +102,7 @@ func (d *Discover) createDiscoverDaemonSet(ctx context.Context, namespace, disco }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": discoverDaemonsetName, - }, + Labels: getLabels(), }, Spec: v1.PodSpec{ ServiceAccountName: securityAccount, @@ -241,6 +237,13 @@ func (d *Discover) createDiscoverDaemonSet(ctx context.Context, namespace, disco } +func getLabels() map[string]string { + labels := make(map[string]string) + k8sutil.AddRecommendedLabels(labels, "rook-discover", "rook-ceph-operator", "rook-discover", "rook-discover") + labels["app"] = discoverDaemonsetName + return labels +} + func getEnvVar(varName string, defaultValue string) string { envValue := os.Getenv(varName) if envValue != "" { diff --git a/pkg/operator/k8sutil/labels.go b/pkg/operator/k8sutil/labels.go index b41537fb9989..c17c6980a6c5 100644 --- a/pkg/operator/k8sutil/labels.go +++ b/pkg/operator/k8sutil/labels.go @@ -17,6 +17,7 @@ limitations under the License. package k8sutil import ( + "os" "strings" ) @@ -45,3 +46,15 @@ func ParseStringToLabels(in string) map[string]string { return labels } + +// AddRecommendedLabels adds the labels to the resources created by rook +// The labels added are name, instance,etc +func AddRecommendedLabels(labels map[string]string, appName, parentName, resourceKind, resourceInstance string) { + labels["app.kubernetes.io/name"] = appName + labels["app.kubernetes.io/instance"] = resourceInstance + labels["app.kubernetes.io/component"] = resourceKind + labels["app.kubernetes.io/part-of"] = parentName + labels["app.kubernetes.io/managed-by"] = "rook-ceph-operator" + labels["app.kubernetes.io/created-by"] = "rook-ceph-operator" + labels["rook.io/operator-namespace"] = os.Getenv(PodNamespaceEnvVar) +}