From c2a01c046aa9b1e235a5febebe3e0895c3a344a5 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Mon, 14 Feb 2022 12:29:01 +0530 Subject: [PATCH] radosnamespace: add new CRD This introduces a new CRD to add the ability to create rados namespace for a given ceph block pool. Typically the name of the pool is the name of the blockpool created by rook. Closes: #7035 Signed-off-by: Madhu Rajanna --- .commitlintrc.json | 1 + .github/workflows/canary-integration-test.yml | 5 + Documentation/ceph-pool-radosnamespace.md | 44 +++ .../rook-ceph/templates/clusterrole.yaml | 3 + .../charts/rook-ceph/templates/resources.yaml | 89 +++++ deploy/examples/common.yaml | 3 + deploy/examples/crds.yaml | 88 +++++ deploy/examples/radosnamespace.yaml | 9 + deploy/olm/assemble/metadata-common.yaml | 5 + pkg/apis/ceph.rook.io/v1/pool.go | 4 + pkg/apis/ceph.rook.io/v1/register.go | 2 + pkg/apis/ceph.rook.io/v1/types.go | 46 ++- .../ceph.rook.io/v1/zz_generated.deepcopy.go | 111 +++++++ .../ceph.rook.io/v1/ceph.rook.io_client.go | 5 + .../v1/cephblockpoolradosnamespace.go | 178 ++++++++++ .../v1/fake/fake_ceph.rook.io_client.go | 4 + .../fake/fake_cephblockpoolradosnamespace.go | 130 ++++++++ .../ceph.rook.io/v1/generated_expansion.go | 2 + .../v1/cephblockpoolradosnamespace.go | 90 +++++ .../ceph.rook.io/v1/interface.go | 7 + .../informers/externalversions/generic.go | 2 + .../v1/cephblockpoolradosnamespace.go | 99 ++++++ .../ceph.rook.io/v1/expansion_generated.go | 8 + pkg/daemon/ceph/client/radosnamespace.go | 97 ++++++ pkg/operator/ceph/cluster/dependents.go | 1 + pkg/operator/ceph/controller/predicate.go | 25 ++ pkg/operator/ceph/cr_manager.go | 2 + pkg/operator/ceph/pool/controller.go | 15 +- pkg/operator/ceph/pool/dependents.go | 48 +++ pkg/operator/ceph/pool/dependents_test.go | 93 ++++++ .../ceph/pool/radosnamespace/controller.go | 309 ++++++++++++++++++ .../pool/radosnamespace/controller_test.go | 288 ++++++++++++++++ tests/scripts/github-action-helper.sh | 1 + 33 files changed, 1812 insertions(+), 2 deletions(-) create mode 100644 Documentation/ceph-pool-radosnamespace.md create mode 100644 deploy/examples/radosnamespace.yaml create mode 100644 pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go create mode 100644 pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephblockpoolradosnamespace.go create mode 100644 pkg/client/informers/externalversions/ceph.rook.io/v1/cephblockpoolradosnamespace.go create mode 100644 pkg/client/listers/ceph.rook.io/v1/cephblockpoolradosnamespace.go create mode 100644 pkg/daemon/ceph/client/radosnamespace.go create mode 100644 pkg/operator/ceph/pool/dependents.go create mode 100644 pkg/operator/ceph/pool/dependents_test.go create mode 100644 pkg/operator/ceph/pool/radosnamespace/controller.go create mode 100644 pkg/operator/ceph/pool/radosnamespace/controller_test.go diff --git a/.commitlintrc.json b/.commitlintrc.json index 3c36aaa84fbda..f9a5d00601c19 100644 --- a/.commitlintrc.json +++ b/.commitlintrc.json @@ -30,6 +30,7 @@ "rgw", "security", "subvolumegroup", + "radosnamespace", "test" ] ], diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index cc90a25a967d6..2643312296fe5 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -91,6 +91,11 @@ jobs: toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- ceph fs subvolumegroup ls myfs|jq .[0].name|grep -q "group-a"; do sleep 1 && echo 'waiting for the subvolumegroup to be created'; done" + - name: wait for the rados namespace to be created + run: | + toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') + timeout 60 sh -c "until kubectl -n rook-ceph exec $toolbox -- rbd namespace ls replicapool|jq .[0].name|grep -q "namespace-a"; do sleep 1 && echo 'waiting for the rados namespace to be created'; done" + - name: test external script with restricted_auth_permission flag and without having cephfs_filesystem flag run: | toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') diff --git a/Documentation/ceph-pool-radosnamespace.md b/Documentation/ceph-pool-radosnamespace.md new file mode 100644 index 0000000000000..52df33df66d1a --- /dev/null +++ b/Documentation/ceph-pool-radosnamespace.md @@ -0,0 +1,44 @@ +--- +title: RadosNamespace CRD +weight: 3610 +indent: true +--- + +{% include_relative branch.liquid %} + +This guide assumes you have created a Rook cluster as explained in the main [Quickstart guide](quickstart.md) + +# CephBlockPoolRadosNamespace CRD + +Rook allows creation of Ceph BlockPool +[RadosNamespaces](https://docs.ceph.com/en/latest/man/8/rbd/) through the +custom resource definitions (CRDs). BlockPool Rados Namespace is an abstraction +for a block pool. For more information about BlockPool and namespace refer to +the [Ceph docs](https://docs.ceph.com/en/latest/man/8/rbd/). + +## Creating daemon + +To get you started, here is a simple example of a CRD to create a CephBlockPoolRadosNamespace on the CephBlockPool "replicapool". + +```yaml +apiVersion: ceph.rook.io/v1 +kind: CephBlockPoolRadosNamespace +metadata: + name: namespace-a + namespace: rook-ceph # namespace:cluster +spec: + # blockpoolName is the name of Ceph BlockPool. Typically it's the name of the CephBlockPool CR. + blockpoolName: replicapool +``` + +## Settings + +If any setting is unspecified, a suitable default will be used automatically. + +### CephBlockPoolRadosNamespace metadata + +- `name`: The name that will be used for the Ceph BlockPool rados namespace. + +### CephBlockPoolRadosNamespace spec + +- `blockPoolName`: The metadata name of the CephBlockPool CR where the rados namespace will be created. diff --git a/deploy/charts/rook-ceph/templates/clusterrole.yaml b/deploy/charts/rook-ceph/templates/clusterrole.yaml index a61cd8a2440cf..44187988f97da 100644 --- a/deploy/charts/rook-ceph/templates/clusterrole.yaml +++ b/deploy/charts/rook-ceph/templates/clusterrole.yaml @@ -136,6 +136,7 @@ rules: - cephrbdmirrors - cephfilesystemmirrors - cephfilesystemsubvolumegroups + - cephblockpoolradosnamespaces verbs: - get - list @@ -160,6 +161,7 @@ rules: - cephrbdmirrors/status - cephfilesystemmirrors/status - cephfilesystemsubvolumegroups/status + - cephblockpoolradosnamespaces/status verbs: ["update"] # The "*/finalizers" permission may need to be strictly given for K8s clusters where # OwnerReferencesPermissionEnforcement is enabled so that Rook can set blockOwnerDeletion on @@ -182,6 +184,7 @@ rules: - cephrbdmirrors/finalizers - cephfilesystemmirrors/finalizers - cephfilesystemsubvolumegroups/finalizers + - cephblockpoolradosnamespaces/finalizers verbs: ["update"] - apiGroups: - policy diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml index c18ace07f57dc..8c69669f6b2e9 100644 --- a/deploy/charts/rook-ceph/templates/resources.yaml +++ b/deploy/charts/rook-ceph/templates/resources.yaml @@ -1,6 +1,73 @@ {{- if .Values.crds.enabled }} apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + helm.sh/resource-policy: keep + creationTimestamp: null + name: cephblockpoolradosnamespaces.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPoolRadosNamespace + listKind: CephBlockPoolRadosNamespaceList + plural: cephblockpoolradosnamespaces + singular: cephblockpoolradosnamespace + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph BlockPool Rados Namespace + properties: + blockpoolName: + description: BlockPoolName is the name of Ceph BlockPool. Typically it's the name of the CephBlockPool CR. + type: string + required: + - blockpoolName + type: object + status: + description: Status represents the status of a CephBlockPool Rados Namespace + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c @@ -200,6 +267,28 @@ spec: status: description: CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array info: additionalProperties: type: string diff --git a/deploy/examples/common.yaml b/deploy/examples/common.yaml index b560c9adc7fe2..68d0ce4f565bb 100644 --- a/deploy/examples/common.yaml +++ b/deploy/examples/common.yaml @@ -312,6 +312,7 @@ rules: - cephrbdmirrors - cephfilesystemmirrors - cephfilesystemsubvolumegroups + - cephblockpoolradosnamespaces verbs: - get - list @@ -336,6 +337,7 @@ rules: - cephrbdmirrors/status - cephfilesystemmirrors/status - cephfilesystemsubvolumegroups/status + - cephblockpoolradosnamespaces/status verbs: ["update"] # The "*/finalizers" permission may need to be strictly given for K8s clusters where # OwnerReferencesPermissionEnforcement is enabled so that Rook can set blockOwnerDeletion on @@ -358,6 +360,7 @@ rules: - cephrbdmirrors/finalizers - cephfilesystemmirrors/finalizers - cephfilesystemsubvolumegroups/finalizers + - cephblockpoolradosnamespaces/finalizers verbs: ["update"] - apiGroups: - policy diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml index e8239aceaea49..9780eb455c70d 100644 --- a/deploy/examples/crds.yaml +++ b/deploy/examples/crds.yaml @@ -5,6 +5,72 @@ --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c + creationTimestamp: null + name: cephblockpoolradosnamespaces.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPoolRadosNamespace + listKind: CephBlockPoolRadosNamespaceList + plural: cephblockpoolradosnamespaces + singular: cephblockpoolradosnamespace + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec represents the specification of a Ceph BlockPool Rados Namespace + properties: + blockpoolName: + description: BlockPoolName is the name of Ceph BlockPool. Typically it's the name of the CephBlockPool CR. + type: string + required: + - blockpoolName + type: object + status: + description: Status represents the status of a CephBlockPool Rados Namespace + properties: + info: + additionalProperties: + type: string + nullable: true + type: object + phase: + description: ConditionType represent a resource's status + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c @@ -203,6 +269,28 @@ spec: status: description: CephBlockPoolStatus represents the mirroring status of Ceph Storage Pool properties: + conditions: + items: + description: Condition represents a status condition on any Rook-Ceph Custom Resource. + properties: + lastHeartbeatTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a reason for a condition + type: string + status: + type: string + type: + description: ConditionType represent a resource's status + type: string + type: object + type: array info: additionalProperties: type: string diff --git a/deploy/examples/radosnamespace.yaml b/deploy/examples/radosnamespace.yaml new file mode 100644 index 0000000000000..129dc37d05d67 --- /dev/null +++ b/deploy/examples/radosnamespace.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: ceph.rook.io/v1 +kind: CephBlockPoolRadosNamespace +metadata: + name: namespace-a + namespace: rook-ceph # namespace:cluster +spec: + # blockpoolName is the name of Ceph BlockPool. Typically it's the name of the CephBlockPool CR. + blockpoolName: replicapool diff --git a/deploy/olm/assemble/metadata-common.yaml b/deploy/olm/assemble/metadata-common.yaml index 52c17383a7ded..cf4be72800c73 100644 --- a/deploy/olm/assemble/metadata-common.yaml +++ b/deploy/olm/assemble/metadata-common.yaml @@ -168,6 +168,11 @@ spec: version: v1 displayName: Ceph Filesystem SubVolumeGroup description: Represents a Ceph Filesystem SubVolumeGroup. + - kind: CephBlockPoolRadosNamespace + name: cephblockpoolradosnamespaces.ceph.rook.io + version: v1 + displayName: Ceph BlockPool Rados Namespace + description: Represents a CCeph BlockPool Rados Namespace. displayName: Rook-Ceph description: | diff --git a/pkg/apis/ceph.rook.io/v1/pool.go b/pkg/apis/ceph.rook.io/v1/pool.go index 4cd0ee27b9e54..622fa3e52bb97 100644 --- a/pkg/apis/ceph.rook.io/v1/pool.go +++ b/pkg/apis/ceph.rook.io/v1/pool.go @@ -118,6 +118,10 @@ func (p *CephBlockPool) ValidateDelete() error { return nil } +func (p *CephBlockPool) GetStatusConditions() *[]Condition { + return &p.Status.Conditions +} + // SnapshotSchedulesEnabled returns whether snapshot schedules are desired func (p *MirroringSpec) SnapshotSchedulesEnabled() bool { return len(p.SnapshotSchedules) > 0 diff --git a/pkg/apis/ceph.rook.io/v1/register.go b/pkg/apis/ceph.rook.io/v1/register.go index ef6b5aca117cf..928fd1aff5538 100644 --- a/pkg/apis/ceph.rook.io/v1/register.go +++ b/pkg/apis/ceph.rook.io/v1/register.go @@ -85,6 +85,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CephFilesystemMirrorList{}, &CephFilesystemSubVolumeGroup{}, &CephFilesystemSubVolumeGroupList{}, + &CephBlockPoolRadosNamespace{}, + &CephBlockPoolRadosNamespaceList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) scheme.AddKnownTypes(bktv1alpha1.SchemeGroupVersion, diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go index e9ca6c1dc05bd..311c0a5107c64 100755 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ b/pkg/apis/ceph.rook.io/v1/types.go @@ -683,7 +683,8 @@ type CephBlockPoolStatus struct { SnapshotScheduleStatus *SnapshotScheduleStatusSpec `json:"snapshotScheduleStatus,omitempty"` // +optional // +nullable - Info map[string]string `json:"info,omitempty"` + Info map[string]string `json:"info,omitempty"` + Conditions []Condition `json:"conditions,omitempty"` } // MirroringStatusSpec is the status of the pool mirroring @@ -2358,3 +2359,46 @@ type CephFilesystemSubVolumeGroupStatus struct { // +nullable Info map[string]string `json:"info,omitempty"` } + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephBlockPoolRadosNamespace represents a Ceph BlockPool Rados Namespace +// +kubebuilder:subresource:status +type CephBlockPoolRadosNamespace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // Spec represents the specification of a Ceph BlockPool Rados Namespace + Spec CephBlockPoolRadosNamespaceSpec `json:"spec"` + // Status represents the status of a CephBlockPool Rados Namespace + // +kubebuilder:pruning:PreserveUnknownFields + // +optional + Status *CephBlockPoolRadosNamespaceStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CephBlockPoolRadosNamespaceList represents a list of Ceph BlockPool Rados Namespace +type CephBlockPoolRadosNamespaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []CephBlockPoolRadosNamespace `json:"items"` +} + +// CephBlockPoolRadosNamespaceSpec represents the specification of a CephBlockPool Rados Namespace +type CephBlockPoolRadosNamespaceSpec struct { + // BlockPoolName is the name of Ceph BlockPool. Typically it's the name of + // the CephBlockPool CR. + BlockPoolName string `json:"blockpoolName"` +} + +// CephBlockPoolRadosNamespaceStatus represents the Status of Ceph BlockPool +// Rados Namespace +type CephBlockPoolRadosNamespaceStatus struct { + // +optional + Phase ConditionType `json:"phase,omitempty"` + // +optional + // +nullable + Info map[string]string `json:"info,omitempty"` +} diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go index c12ac3c179437..575d29699aa62 100644 --- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go +++ b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go @@ -289,6 +289,110 @@ func (in *CephBlockPoolList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespace) DeepCopyInto(out *CephBlockPoolRadosNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(CephBlockPoolRadosNamespaceStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespace. +func (in *CephBlockPoolRadosNamespace) DeepCopy() *CephBlockPoolRadosNamespace { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBlockPoolRadosNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespaceList) DeepCopyInto(out *CephBlockPoolRadosNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CephBlockPoolRadosNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespaceList. +func (in *CephBlockPoolRadosNamespaceList) DeepCopy() *CephBlockPoolRadosNamespaceList { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CephBlockPoolRadosNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespaceSpec) DeepCopyInto(out *CephBlockPoolRadosNamespaceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespaceSpec. +func (in *CephBlockPoolRadosNamespaceSpec) DeepCopy() *CephBlockPoolRadosNamespaceSpec { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephBlockPoolRadosNamespaceStatus) DeepCopyInto(out *CephBlockPoolRadosNamespaceStatus) { + *out = *in + if in.Info != nil { + in, out := &in.Info, &out.Info + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephBlockPoolRadosNamespaceStatus. +func (in *CephBlockPoolRadosNamespaceStatus) DeepCopy() *CephBlockPoolRadosNamespaceStatus { + if in == nil { + return nil + } + out := new(CephBlockPoolRadosNamespaceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CephBlockPoolStatus) DeepCopyInto(out *CephBlockPoolStatus) { *out = *in @@ -314,6 +418,13 @@ func (in *CephBlockPoolStatus) DeepCopyInto(out *CephBlockPoolStatus) { (*out)[key] = val } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go index c4d622edabbd9..394dc08e1bf88 100644 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go @@ -27,6 +27,7 @@ import ( type CephV1Interface interface { RESTClient() rest.Interface CephBlockPoolsGetter + CephBlockPoolRadosNamespacesGetter CephBucketNotificationsGetter CephBucketTopicsGetter CephClientsGetter @@ -52,6 +53,10 @@ func (c *CephV1Client) CephBlockPools(namespace string) CephBlockPoolInterface { return newCephBlockPools(c, namespace) } +func (c *CephV1Client) CephBlockPoolRadosNamespaces(namespace string) CephBlockPoolRadosNamespaceInterface { + return newCephBlockPoolRadosNamespaces(c, namespace) +} + func (c *CephV1Client) CephBucketNotifications(namespace string) CephBucketNotificationInterface { return newCephBucketNotifications(c, namespace) } diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go new file mode 100644 index 0000000000000..0883cb40bf1ce --- /dev/null +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephBlockPoolRadosNamespacesGetter has a method to return a CephBlockPoolRadosNamespaceInterface. +// A group's client should implement this interface. +type CephBlockPoolRadosNamespacesGetter interface { + CephBlockPoolRadosNamespaces(namespace string) CephBlockPoolRadosNamespaceInterface +} + +// CephBlockPoolRadosNamespaceInterface has methods to work with CephBlockPoolRadosNamespace resources. +type CephBlockPoolRadosNamespaceInterface interface { + Create(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.CreateOptions) (*v1.CephBlockPoolRadosNamespace, error) + Update(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.UpdateOptions) (*v1.CephBlockPoolRadosNamespace, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephBlockPoolRadosNamespace, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephBlockPoolRadosNamespaceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPoolRadosNamespace, err error) + CephBlockPoolRadosNamespaceExpansion +} + +// cephBlockPoolRadosNamespaces implements CephBlockPoolRadosNamespaceInterface +type cephBlockPoolRadosNamespaces struct { + client rest.Interface + ns string +} + +// newCephBlockPoolRadosNamespaces returns a CephBlockPoolRadosNamespaces +func newCephBlockPoolRadosNamespaces(c *CephV1Client, namespace string) *cephBlockPoolRadosNamespaces { + return &cephBlockPoolRadosNamespaces{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephBlockPoolRadosNamespace, and returns the corresponding cephBlockPoolRadosNamespace object, and an error if there is any. +func (c *cephBlockPoolRadosNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephBlockPoolRadosNamespaces that match those selectors. +func (c *cephBlockPoolRadosNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephBlockPoolRadosNamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephBlockPoolRadosNamespaceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephBlockPoolRadosNamespaces. +func (c *cephBlockPoolRadosNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephBlockPoolRadosNamespace and creates it. Returns the server's representation of the cephBlockPoolRadosNamespace, and an error, if there is any. +func (c *cephBlockPoolRadosNamespaces) Create(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.CreateOptions) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBlockPoolRadosNamespace). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephBlockPoolRadosNamespace and updates it. Returns the server's representation of the cephBlockPoolRadosNamespace, and an error, if there is any. +func (c *cephBlockPoolRadosNamespaces) Update(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.UpdateOptions) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(cephBlockPoolRadosNamespace.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBlockPoolRadosNamespace). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephBlockPoolRadosNamespace and deletes it. Returns an error if one occurs. +func (c *cephBlockPoolRadosNamespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephBlockPoolRadosNamespaces) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephBlockPoolRadosNamespace. +func (c *cephBlockPoolRadosNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go index ff9c8aa8cf9b2..c92c4e76866ea 100644 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_ceph.rook.io_client.go @@ -32,6 +32,10 @@ func (c *FakeCephV1) CephBlockPools(namespace string) v1.CephBlockPoolInterface return &FakeCephBlockPools{c, namespace} } +func (c *FakeCephV1) CephBlockPoolRadosNamespaces(namespace string) v1.CephBlockPoolRadosNamespaceInterface { + return &FakeCephBlockPoolRadosNamespaces{c, namespace} +} + func (c *FakeCephV1) CephBucketNotifications(namespace string) v1.CephBucketNotificationInterface { return &FakeCephBucketNotifications{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephblockpoolradosnamespace.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephblockpoolradosnamespace.go new file mode 100644 index 0000000000000..5281ead498f87 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/fake/fake_cephblockpoolradosnamespace.go @@ -0,0 +1,130 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCephBlockPoolRadosNamespaces implements CephBlockPoolRadosNamespaceInterface +type FakeCephBlockPoolRadosNamespaces struct { + Fake *FakeCephV1 + ns string +} + +var cephblockpoolradosnamespacesResource = schema.GroupVersionResource{Group: "ceph.rook.io", Version: "v1", Resource: "cephblockpoolradosnamespaces"} + +var cephblockpoolradosnamespacesKind = schema.GroupVersionKind{Group: "ceph.rook.io", Version: "v1", Kind: "CephBlockPoolRadosNamespace"} + +// Get takes name of the cephBlockPoolRadosNamespace, and returns the corresponding cephBlockPoolRadosNamespace object, and an error if there is any. +func (c *FakeCephBlockPoolRadosNamespaces) Get(ctx context.Context, name string, options v1.GetOptions) (result *cephrookiov1.CephBlockPoolRadosNamespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(cephblockpoolradosnamespacesResource, c.ns, name), &cephrookiov1.CephBlockPoolRadosNamespace{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephBlockPoolRadosNamespace), err +} + +// List takes label and field selectors, and returns the list of CephBlockPoolRadosNamespaces that match those selectors. +func (c *FakeCephBlockPoolRadosNamespaces) List(ctx context.Context, opts v1.ListOptions) (result *cephrookiov1.CephBlockPoolRadosNamespaceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(cephblockpoolradosnamespacesResource, cephblockpoolradosnamespacesKind, c.ns, opts), &cephrookiov1.CephBlockPoolRadosNamespaceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &cephrookiov1.CephBlockPoolRadosNamespaceList{ListMeta: obj.(*cephrookiov1.CephBlockPoolRadosNamespaceList).ListMeta} + for _, item := range obj.(*cephrookiov1.CephBlockPoolRadosNamespaceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cephBlockPoolRadosNamespaces. +func (c *FakeCephBlockPoolRadosNamespaces) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(cephblockpoolradosnamespacesResource, c.ns, opts)) + +} + +// Create takes the representation of a cephBlockPoolRadosNamespace and creates it. Returns the server's representation of the cephBlockPoolRadosNamespace, and an error, if there is any. +func (c *FakeCephBlockPoolRadosNamespaces) Create(ctx context.Context, cephBlockPoolRadosNamespace *cephrookiov1.CephBlockPoolRadosNamespace, opts v1.CreateOptions) (result *cephrookiov1.CephBlockPoolRadosNamespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(cephblockpoolradosnamespacesResource, c.ns, cephBlockPoolRadosNamespace), &cephrookiov1.CephBlockPoolRadosNamespace{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephBlockPoolRadosNamespace), err +} + +// Update takes the representation of a cephBlockPoolRadosNamespace and updates it. Returns the server's representation of the cephBlockPoolRadosNamespace, and an error, if there is any. +func (c *FakeCephBlockPoolRadosNamespaces) Update(ctx context.Context, cephBlockPoolRadosNamespace *cephrookiov1.CephBlockPoolRadosNamespace, opts v1.UpdateOptions) (result *cephrookiov1.CephBlockPoolRadosNamespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(cephblockpoolradosnamespacesResource, c.ns, cephBlockPoolRadosNamespace), &cephrookiov1.CephBlockPoolRadosNamespace{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephBlockPoolRadosNamespace), err +} + +// Delete takes name of the cephBlockPoolRadosNamespace and deletes it. Returns an error if one occurs. +func (c *FakeCephBlockPoolRadosNamespaces) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(cephblockpoolradosnamespacesResource, c.ns, name), &cephrookiov1.CephBlockPoolRadosNamespace{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCephBlockPoolRadosNamespaces) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(cephblockpoolradosnamespacesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &cephrookiov1.CephBlockPoolRadosNamespaceList{}) + return err +} + +// Patch applies the patch and returns the patched cephBlockPoolRadosNamespace. +func (c *FakeCephBlockPoolRadosNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *cephrookiov1.CephBlockPoolRadosNamespace, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(cephblockpoolradosnamespacesResource, c.ns, name, pt, data, subresources...), &cephrookiov1.CephBlockPoolRadosNamespace{}) + + if obj == nil { + return nil, err + } + return obj.(*cephrookiov1.CephBlockPoolRadosNamespace), err +} diff --git a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go index f7aea4f9e06b9..bfb06dbcde44b 100644 --- a/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go @@ -20,6 +20,8 @@ package v1 type CephBlockPoolExpansion interface{} +type CephBlockPoolRadosNamespaceExpansion interface{} + type CephBucketNotificationExpansion interface{} type CephBucketTopicExpansion interface{} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/cephblockpoolradosnamespace.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephblockpoolradosnamespace.go new file mode 100644 index 0000000000000..a01352f5755ec --- /dev/null +++ b/pkg/client/informers/externalversions/ceph.rook.io/v1/cephblockpoolradosnamespace.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + cephrookiov1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + versioned "github.com/rook/rook/pkg/client/clientset/versioned" + internalinterfaces "github.com/rook/rook/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/rook/rook/pkg/client/listers/ceph.rook.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CephBlockPoolRadosNamespaceInformer provides access to a shared informer and lister for +// CephBlockPoolRadosNamespaces. +type CephBlockPoolRadosNamespaceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CephBlockPoolRadosNamespaceLister +} + +type cephBlockPoolRadosNamespaceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCephBlockPoolRadosNamespaceInformer constructs a new informer for CephBlockPoolRadosNamespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCephBlockPoolRadosNamespaceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCephBlockPoolRadosNamespaceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCephBlockPoolRadosNamespaceInformer constructs a new informer for CephBlockPoolRadosNamespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCephBlockPoolRadosNamespaceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CephV1().CephBlockPoolRadosNamespaces(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CephV1().CephBlockPoolRadosNamespaces(namespace).Watch(context.TODO(), options) + }, + }, + &cephrookiov1.CephBlockPoolRadosNamespace{}, + resyncPeriod, + indexers, + ) +} + +func (f *cephBlockPoolRadosNamespaceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCephBlockPoolRadosNamespaceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cephBlockPoolRadosNamespaceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&cephrookiov1.CephBlockPoolRadosNamespace{}, f.defaultInformer) +} + +func (f *cephBlockPoolRadosNamespaceInformer) Lister() v1.CephBlockPoolRadosNamespaceLister { + return v1.NewCephBlockPoolRadosNamespaceLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go b/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go index da097ad2659d9..c63b242b44576 100644 --- a/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go +++ b/pkg/client/informers/externalversions/ceph.rook.io/v1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // CephBlockPools returns a CephBlockPoolInformer. CephBlockPools() CephBlockPoolInformer + // CephBlockPoolRadosNamespaces returns a CephBlockPoolRadosNamespaceInformer. + CephBlockPoolRadosNamespaces() CephBlockPoolRadosNamespaceInformer // CephBucketNotifications returns a CephBucketNotificationInformer. CephBucketNotifications() CephBucketNotificationInformer // CephBucketTopics returns a CephBucketTopicInformer. @@ -72,6 +74,11 @@ func (v *version) CephBlockPools() CephBlockPoolInformer { return &cephBlockPoolInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// CephBlockPoolRadosNamespaces returns a CephBlockPoolRadosNamespaceInformer. +func (v *version) CephBlockPoolRadosNamespaces() CephBlockPoolRadosNamespaceInformer { + return &cephBlockPoolRadosNamespaceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // CephBucketNotifications returns a CephBucketNotificationInformer. func (v *version) CephBucketNotifications() CephBucketNotificationInformer { return &cephBucketNotificationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index cac94cb33f2b5..9a0e6bc2872e7 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -55,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=ceph.rook.io, Version=v1 case v1.SchemeGroupVersion.WithResource("cephblockpools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephBlockPools().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("cephblockpoolradosnamespaces"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephBlockPoolRadosNamespaces().Informer()}, nil case v1.SchemeGroupVersion.WithResource("cephbucketnotifications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ceph().V1().CephBucketNotifications().Informer()}, nil case v1.SchemeGroupVersion.WithResource("cephbuckettopics"): diff --git a/pkg/client/listers/ceph.rook.io/v1/cephblockpoolradosnamespace.go b/pkg/client/listers/ceph.rook.io/v1/cephblockpoolradosnamespace.go new file mode 100644 index 0000000000000..22bc41973d40b --- /dev/null +++ b/pkg/client/listers/ceph.rook.io/v1/cephblockpoolradosnamespace.go @@ -0,0 +1,99 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CephBlockPoolRadosNamespaceLister helps list CephBlockPoolRadosNamespaces. +// All objects returned here must be treated as read-only. +type CephBlockPoolRadosNamespaceLister interface { + // List lists all CephBlockPoolRadosNamespaces in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.CephBlockPoolRadosNamespace, err error) + // CephBlockPoolRadosNamespaces returns an object that can list and get CephBlockPoolRadosNamespaces. + CephBlockPoolRadosNamespaces(namespace string) CephBlockPoolRadosNamespaceNamespaceLister + CephBlockPoolRadosNamespaceListerExpansion +} + +// cephBlockPoolRadosNamespaceLister implements the CephBlockPoolRadosNamespaceLister interface. +type cephBlockPoolRadosNamespaceLister struct { + indexer cache.Indexer +} + +// NewCephBlockPoolRadosNamespaceLister returns a new CephBlockPoolRadosNamespaceLister. +func NewCephBlockPoolRadosNamespaceLister(indexer cache.Indexer) CephBlockPoolRadosNamespaceLister { + return &cephBlockPoolRadosNamespaceLister{indexer: indexer} +} + +// List lists all CephBlockPoolRadosNamespaces in the indexer. +func (s *cephBlockPoolRadosNamespaceLister) List(selector labels.Selector) (ret []*v1.CephBlockPoolRadosNamespace, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CephBlockPoolRadosNamespace)) + }) + return ret, err +} + +// CephBlockPoolRadosNamespaces returns an object that can list and get CephBlockPoolRadosNamespaces. +func (s *cephBlockPoolRadosNamespaceLister) CephBlockPoolRadosNamespaces(namespace string) CephBlockPoolRadosNamespaceNamespaceLister { + return cephBlockPoolRadosNamespaceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CephBlockPoolRadosNamespaceNamespaceLister helps list and get CephBlockPoolRadosNamespaces. +// All objects returned here must be treated as read-only. +type CephBlockPoolRadosNamespaceNamespaceLister interface { + // List lists all CephBlockPoolRadosNamespaces in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.CephBlockPoolRadosNamespace, err error) + // Get retrieves the CephBlockPoolRadosNamespace from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.CephBlockPoolRadosNamespace, error) + CephBlockPoolRadosNamespaceNamespaceListerExpansion +} + +// cephBlockPoolRadosNamespaceNamespaceLister implements the CephBlockPoolRadosNamespaceNamespaceLister +// interface. +type cephBlockPoolRadosNamespaceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CephBlockPoolRadosNamespaces in the indexer for a given namespace. +func (s cephBlockPoolRadosNamespaceNamespaceLister) List(selector labels.Selector) (ret []*v1.CephBlockPoolRadosNamespace, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.CephBlockPoolRadosNamespace)) + }) + return ret, err +} + +// Get retrieves the CephBlockPoolRadosNamespace from the indexer for a given namespace and name. +func (s cephBlockPoolRadosNamespaceNamespaceLister) Get(name string) (*v1.CephBlockPoolRadosNamespace, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("cephblockpoolradosnamespace"), name) + } + return obj.(*v1.CephBlockPoolRadosNamespace), nil +} diff --git a/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go b/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go index b31641e019b50..ebeeb58a805fe 100644 --- a/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go +++ b/pkg/client/listers/ceph.rook.io/v1/expansion_generated.go @@ -26,6 +26,14 @@ type CephBlockPoolListerExpansion interface{} // CephBlockPoolNamespaceLister. type CephBlockPoolNamespaceListerExpansion interface{} +// CephBlockPoolRadosNamespaceListerExpansion allows custom methods to be added to +// CephBlockPoolRadosNamespaceLister. +type CephBlockPoolRadosNamespaceListerExpansion interface{} + +// CephBlockPoolRadosNamespaceNamespaceListerExpansion allows custom methods to be added to +// CephBlockPoolRadosNamespaceNamespaceLister. +type CephBlockPoolRadosNamespaceNamespaceListerExpansion interface{} + // CephBucketNotificationListerExpansion allows custom methods to be added to // CephBucketNotificationLister. type CephBucketNotificationListerExpansion interface{} diff --git a/pkg/daemon/ceph/client/radosnamespace.go b/pkg/daemon/ceph/client/radosnamespace.go new file mode 100644 index 0000000000000..08388e90b3d1e --- /dev/null +++ b/pkg/daemon/ceph/client/radosnamespace.go @@ -0,0 +1,97 @@ +/* +Copyright 2022 The Rook Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "encoding/json" + "strings" + + "github.com/pkg/errors" + "github.com/rook/rook/pkg/clusterd" +) + +// CreateRadosNamespace create a rados namespace in a pool. +// poolName is the name of the ceph block pool, the same as the CephBlockPool CR name. +func CreateRadosNamespace(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, namespaceName string) error { + logger.Infof("creating rados namespace %q", namespaceName) + // rbd namespace create pool-name/namespace-name + args := []string{"namespace", "create", "--pool", poolName, "--namespace", namespaceName} + cmd := NewRBDCommand(context, clusterInfo, args) + cmd.JsonOutput = false + output, err := cmd.Run() + if err != nil && !strings.Contains(string(output), "File exists") { + return errors.Wrapf(err, "failed to create rados namespace %s/%s. %s", poolName, namespaceName, output) + } + + logger.Infof("successfully created rados namespace %s/%s", poolName, namespaceName) + return nil +} + +func getRadosNamespaceStatistics(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, namespaceName string) (*PoolStatistics, error) { + args := []string{"pool", "stats", "--pool", poolName, "--namespace", namespaceName} + cmd := NewRBDCommand(context, clusterInfo, args) + cmd.JsonOutput = true + output, err := cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "failed to get pool stats. %s", string(output)) + } + + var poolStats PoolStatistics + if err := json.Unmarshal(output, &poolStats); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal pool stats response") + } + + return &poolStats, nil +} + +func checkForImagesInRadosNamespace(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, namespaceName string) error { + var err error + logger.Debugf("checking any images/snapshosts present in pool %s/%s", poolName, namespaceName) + stats, err := getRadosNamespaceStatistics(context, clusterInfo, poolName, namespaceName) + if err != nil { + if strings.Contains(err.Error(), "No such file or directory") { + return nil + } + return errors.Wrapf(err, "failed to list images/snapshosts in pool %s/%s", poolName, namespaceName) + } + if stats.Images.Count == 0 && stats.Images.SnapCount == 0 { + logger.Infof("no images/snapshosts present in pool %s/%s", poolName, namespaceName) + return nil + } + + return errors.Errorf("pool %s/%s contains images/snapshosts", poolName, namespaceName) +} + +// DeleteRadosNamespace delete a rados namespace. +func DeleteRadosNamespace(context *clusterd.Context, clusterInfo *ClusterInfo, poolName, namespaceName string) error { + err := checkForImagesInRadosNamespace(context, clusterInfo, poolName, namespaceName) + if err != nil { + return errors.Wrapf(err, "failed to check if pool %s/%s has rbd images", poolName, namespaceName) + } + logger.Infof("deleting rados namespace %s/%s", poolName, namespaceName) + args := []string{"namespace", "remove", "--pool", poolName, "--namespace", namespaceName} + cmd := NewRBDCommand(context, clusterInfo, args) + cmd.JsonOutput = false + output, err := cmd.Run() + if err != nil && !strings.Contains(string(output), "No such file or directory") { + logger.Debugf("failed to delete rados namespace %s/%s. %s", poolName, namespaceName, output) + return err + } + + logger.Infof("successfully deleted rados namespace %s/%s", poolName, namespaceName) + return nil +} diff --git a/pkg/operator/ceph/cluster/dependents.go b/pkg/operator/ceph/cluster/dependents.go index 733c68f577f46..9d36c88dc4e59 100644 --- a/pkg/operator/ceph/cluster/dependents.go +++ b/pkg/operator/ceph/cluster/dependents.go @@ -47,6 +47,7 @@ var ( "CephBucketTopic", "CephBucketNotification", "CephFilesystemSubVolumeGroup", + "CephBlockPoolRadosNamespace", } ) diff --git a/pkg/operator/ceph/controller/predicate.go b/pkg/operator/ceph/controller/predicate.go index 7c2ed2011bd63..aba61f7401e21 100644 --- a/pkg/operator/ceph/controller/predicate.go +++ b/pkg/operator/ceph/controller/predicate.go @@ -409,6 +409,31 @@ func WatchControllerPredicate() predicate.Funcs { return true } logger.Debugf("no change in CR %q", objNew.Name) + + case *cephv1.CephBlockPoolRadosNamespace: + objNew := e.ObjectNew.(*cephv1.CephBlockPoolRadosNamespace) + logger.Debug("update event on CephBlockPoolRadosNamespace CR") + // If the labels "do_not_reconcile" is set on the object, let's not reconcile that request + IsDoNotReconcile := IsDoNotReconcile(objNew.GetLabels()) + if IsDoNotReconcile { + logger.Debugf("object %q matched on update but %q label is set, doing nothing", objNew.Name, DoNotReconcileLabelName) + return false + } + diff := cmp.Diff(objOld.Spec, objNew.Spec, resourceQtyComparer) + if diff != "" { + logger.Infof("CR has changed for %q. diff=%s", objNew.Name, diff) + return true + } else if objectToBeDeleted(objOld, objNew) { + logger.Debugf("CR %q is going be deleted", objNew.Name) + return true + } else if objOld.GetGeneration() != objNew.GetGeneration() { + logger.Debugf("skipping resource %q update with unchanged spec", objNew.Name) + } + // Handling upgrades + isUpgrade := isUpgrade(objOld.GetLabels(), objNew.GetLabels()) + if isUpgrade { + return true + } } return false }, diff --git a/pkg/operator/ceph/cr_manager.go b/pkg/operator/ceph/cr_manager.go index 207b901d1f1aa..e76525dc21aae 100644 --- a/pkg/operator/ceph/cr_manager.go +++ b/pkg/operator/ceph/cr_manager.go @@ -44,6 +44,7 @@ import ( "github.com/rook/rook/pkg/operator/ceph/object/zone" "github.com/rook/rook/pkg/operator/ceph/object/zonegroup" "github.com/rook/rook/pkg/operator/ceph/pool" + "github.com/rook/rook/pkg/operator/ceph/pool/radosnamespace" "k8s.io/apimachinery/pkg/runtime" mapiv1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" @@ -108,6 +109,7 @@ var AddToManagerFuncs = []func(manager.Manager, *clusterd.Context, context.Conte topic.Add, notification.Add, subvolumegroup.Add, + radosnamespace.Add, } // AddToManagerOpFunc is a list of functions to add all Controllers to the Manager (entrypoint for diff --git a/pkg/operator/ceph/pool/controller.go b/pkg/operator/ceph/pool/controller.go index 5e46bc9d436e7..a4d54f9178a63 100644 --- a/pkg/operator/ceph/pool/controller.go +++ b/pkg/operator/ceph/pool/controller.go @@ -33,12 +33,14 @@ import ( "github.com/rook/rook/pkg/operator/ceph/config" opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" "github.com/rook/rook/pkg/operator/ceph/csi/peermap" + "github.com/rook/rook/pkg/operator/ceph/reporting" "github.com/rook/rook/pkg/operator/k8sutil" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -67,6 +69,7 @@ var _ reconcile.Reconciler = &ReconcileCephBlockPool{} // ReconcileCephBlockPool reconciles a CephBlockPool object type ReconcileCephBlockPool struct { client client.Client + recorder record.EventRecorder scheme *runtime.Scheme context *clusterd.Context clusterInfo *cephclient.ClusterInfo @@ -91,6 +94,7 @@ func newReconciler(mgr manager.Manager, context *clusterd.Context, opManagerCont return &ReconcileCephBlockPool{ client: mgr.GetClient(), scheme: mgr.GetScheme(), + recorder: mgr.GetEventRecorderFor("rook-" + controllerName), context: context, blockPoolContexts: make(map[string]*blockPoolHealth), opManagerContext: opManagerContext, @@ -222,13 +226,22 @@ func (r *ReconcileCephBlockPool) reconcile(request reconcile.Request) (reconcile // DELETE: the CR was deleted if !cephBlockPool.GetDeletionTimestamp().IsZero() { + deps, err := CephBlockPoolDependents(r.context, r.clusterInfo, cephBlockPool) + if err != nil { + return reconcile.Result{}, err + } + if !deps.Empty() { + err := reporting.ReportDeletionBlockedDueToDependents(logger, r.client, cephBlockPool, deps) + return opcontroller.WaitForRequeueIfFinalizerBlocked, err + } + reporting.ReportDeletionNotBlockedDueToDependents(logger, r.client, r.recorder, cephBlockPool) // If the ceph block pool is still in the map, we must remove it during CR deletion // We must remove it first otherwise the checker will panic since the status/info will be nil r.cancelMirrorMonitoring(cephBlockPool) logger.Infof("deleting pool %q", cephBlockPool.Name) poolSpec := cephBlockPool.Spec.ToNamedPoolSpec() - err := deletePool(r.context, clusterInfo, &poolSpec) + err = deletePool(r.context, clusterInfo, &poolSpec) if err != nil { return opcontroller.ImmediateRetryResult, errors.Wrapf(err, "failed to delete pool %q. ", cephBlockPool.Name) } diff --git a/pkg/operator/ceph/pool/dependents.go b/pkg/operator/ceph/pool/dependents.go new file mode 100644 index 0000000000000..56606a408d15a --- /dev/null +++ b/pkg/operator/ceph/pool/dependents.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Rook Authors. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "fmt" + + "github.com/pkg/errors" + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/util/dependents" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CephBlockPoolDependents returns the rbd namespaces (s) which exist in the rbd pool that should block +// deletion. +func CephBlockPoolDependents(clusterdCtx *clusterd.Context, clusterInfo *client.ClusterInfo, blockpool *v1.CephBlockPool) (*dependents.DependentList, error) { + nsName := fmt.Sprintf("%s/%s", blockpool.Namespace, blockpool.Name) + baseErrMsg := fmt.Sprintf("failed to get dependents of CephBlockPool %q", nsName) + + deps := dependents.NewDependentList() + + // CepbBlockPoolNamespaces + namespaces, err := clusterdCtx.RookClientset.CephV1().CephBlockPoolRadosNamespaces(blockpool.Namespace).List(clusterInfo.Context, metav1.ListOptions{}) + if err != nil { + return deps, errors.Wrapf(err, "%s. failed to list CephBlockPoolRadosNamespaces for CephBlockPool %q", baseErrMsg, nsName) + } + for _, namespace := range namespaces.Items { + if namespace.Spec.BlockPoolName == blockpool.Name { + deps.Add("CephBlockPoolRadosNamespaces", namespace.Name) + } + logger.Debugf("found CephBlockPoolRadosNamespaces %q that does not depend on CephBlockPool %q", namespace.Name, nsName) + } + + return deps, nil +} diff --git a/pkg/operator/ceph/pool/dependents_test.go b/pkg/operator/ceph/pool/dependents_test.go new file mode 100644 index 0000000000000..ad44bc1a338e5 --- /dev/null +++ b/pkg/operator/ceph/pool/dependents_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2022 The Rook Authors. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pool + +import ( + "context" + "testing" + + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/stretchr/testify/assert" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestCephBlockPoolDependents(t *testing.T) { + ctx := context.TODO() + scheme := runtime.NewScheme() + assert.NoError(t, cephv1.AddToScheme(scheme)) + ns := "test-ceph-blockpool-dependents" + var c *clusterd.Context + + newClusterdCtx := func(objects ...runtime.Object) *clusterd.Context { + return &clusterd.Context{ + RookClientset: rookclient.NewSimpleClientset(), + } + } + + clusterInfo := client.AdminTestClusterInfo(ns) + // Create objectmeta with the given name in our test namespace + meta := func(name string) v1.ObjectMeta { + return v1.ObjectMeta{ + Name: name, + Namespace: ns, + } + } + + bp := &cephv1.CephBlockPool{ + ObjectMeta: v1.ObjectMeta{ + Name: "replicapool", + Namespace: ns, + }, + } + + t.Run("no namespaces", func(t *testing.T) { + c = newClusterdCtx() + deps, err := CephBlockPoolDependents(c, clusterInfo, bp) + assert.NoError(t, err) + assert.True(t, deps.Empty()) + }) + + t.Run("one namespace but wrong pool", func(t *testing.T) { + otherFs := &cephv1.CephBlockPool{ + ObjectMeta: v1.ObjectMeta{ + Name: "otherns", + Namespace: ns, + }, + } + + c = newClusterdCtx(&cephv1.CephBlockPoolRadosNamespace{ObjectMeta: meta("namespace1")}) + _, err := c.RookClientset.CephV1().CephBlockPoolRadosNamespaces(clusterInfo.Namespace).Create(ctx, &cephv1.CephBlockPoolRadosNamespace{ObjectMeta: meta("namespace1")}, v1.CreateOptions{}) + assert.NoError(t, err) + assert.NoError(t, err) + deps, err := CephBlockPoolDependents(c, clusterInfo, otherFs) + assert.NoError(t, err) + assert.True(t, deps.Empty()) + }) + + t.Run("one namespace", func(t *testing.T) { + c = newClusterdCtx(&cephv1.CephBlockPoolRadosNamespace{ObjectMeta: meta("namespace1")}) + _, err := c.RookClientset.CephV1().CephBlockPoolRadosNamespaces(clusterInfo.Namespace).Create(ctx, &cephv1.CephBlockPoolRadosNamespace{ObjectMeta: meta("namespace1"), Spec: cephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: "replicapool"}}, v1.CreateOptions{}) + assert.NoError(t, err) + assert.NoError(t, err) + deps, err := CephBlockPoolDependents(c, clusterInfo, bp) + assert.NoError(t, err) + assert.False(t, deps.Empty()) + }) + +} diff --git a/pkg/operator/ceph/pool/radosnamespace/controller.go b/pkg/operator/ceph/pool/radosnamespace/controller.go new file mode 100644 index 0000000000000..bffa9663da0ef --- /dev/null +++ b/pkg/operator/ceph/pool/radosnamespace/controller.go @@ -0,0 +1,309 @@ +/* +Copyright 2022 The Rook Authors. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package radosnamespace to manage rbd pool namespaces +package radosnamespace + +import ( + "context" + "fmt" + "reflect" + "strings" + "time" + + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/clusterd" + cephclient "github.com/rook/rook/pkg/daemon/ceph/client" + "github.com/rook/rook/pkg/operator/ceph/cluster/mon" + opcontroller "github.com/rook/rook/pkg/operator/ceph/controller" + "github.com/rook/rook/pkg/operator/ceph/csi" + "github.com/rook/rook/pkg/operator/ceph/reporting" + "github.com/rook/rook/pkg/operator/k8sutil" + + "github.com/coreos/pkg/capnslog" + "github.com/pkg/errors" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + controllerName = "rbd-pool-rados-namespace-controller" +) + +var logger = capnslog.NewPackageLogger("github.com/rook/rook", controllerName) + +var poolNamespace = reflect.TypeOf(cephv1.CephBlockPoolRadosNamespace{}).Name() + +// Sets the type meta for the controller main object +var controllerTypeMeta = metav1.TypeMeta{ + Kind: poolNamespace, + APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), +} + +// ReconcileCephBlockPoolRadosNamespace reconciles a CephBlockPoolRadosNamespace object +type ReconcileCephBlockPoolRadosNamespace struct { + client client.Client + scheme *runtime.Scheme + context *clusterd.Context + clusterInfo *cephclient.ClusterInfo + opManagerContext context.Context +} + +// Add creates a new CephBlockPoolRadosNamespace Controller and adds it to the +// Manager. The Manager will set fields on the Controller and Start it when the +// Manager is Started. +func Add(mgr manager.Manager, context *clusterd.Context, opManagerContext context.Context, opConfig opcontroller.OperatorConfig) error { + return add(mgr, newReconciler(mgr, context, opManagerContext)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager, context *clusterd.Context, opManagerContext context.Context) reconcile.Reconciler { + return &ReconcileCephBlockPoolRadosNamespace{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + context: context, + opManagerContext: opManagerContext, + } +} + +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + logger.Info("successfully started") + + // Watch for changes on the CephBlockPoolRadosNamespace CRD object + err = c.Watch(&source.Kind{Type: &cephv1.CephBlockPoolRadosNamespace{TypeMeta: controllerTypeMeta}}, &handler.EnqueueRequestForObject{}, opcontroller.WatchControllerPredicate()) + if err != nil { + return err + } + + return nil +} + +// Reconcile reads that state of the cluster for a CephBlockPoolRadosNamespace +// object and makes changes based on the state read and what is in the +// CephBlockPoolRadosNamespace.Spec The Controller will requeue the Request to be +// processed again if the returned error is non-nil or Result.Requeue is true, +// otherwise upon completion it will remove the work from the queue. +func (r *ReconcileCephBlockPoolRadosNamespace) Reconcile(context context.Context, request reconcile.Request) (reconcile.Result, error) { + // workaround because the rook logging mechanism is not compatible with the controller-runtime logging interface + reconcileResponse, err := r.reconcile(request) + if err != nil { + logger.Errorf("failed to reconcile %v", err) + } + + return reconcileResponse, err +} + +func (r *ReconcileCephBlockPoolRadosNamespace) reconcile(request reconcile.Request) (reconcile.Result, error) { + // Fetch the CephBlockPoolRadosNamespace instance + cephBlockPoolRadosNamespace := &cephv1.CephBlockPoolRadosNamespace{} + err := r.client.Get(r.opManagerContext, request.NamespacedName, cephBlockPoolRadosNamespace) + if err != nil { + if kerrors.IsNotFound(err) { + logger.Debug("cephBlockPoolRadosNamespace resource not found. Ignoring since object must be deleted.") + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, errors.Wrap(err, "failed to get cephBlockPoolRadosNamespace") + } + + // Set a finalizer so we can do cleanup before the object goes away + err = opcontroller.AddFinalizerIfNotPresent(r.opManagerContext, r.client, cephBlockPoolRadosNamespace) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to add finalizer") + } + + // The CR was just created, initializing status fields + if cephBlockPoolRadosNamespace.Status == nil { + r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionProgressing) + } + + // Make sure a CephCluster is present otherwise do nothing + cephCluster, isReadyToReconcile, cephClusterExists, reconcileResponse := opcontroller.IsReadyToReconcile(r.opManagerContext, r.client, request.NamespacedName, controllerName) + if !isReadyToReconcile { + // This handles the case where the Ceph Cluster is gone and we want to delete that CR + // We skip the deleteRadosNamespace() function since everything is gone already + // + // Also, only remove the finalizer if the CephCluster is gone + // If not, we should wait for it to be ready + // This handles the case where the operator is not ready to accept Ceph command but the cluster exists + if !cephBlockPoolRadosNamespace.GetDeletionTimestamp().IsZero() && !cephClusterExists { + // Remove finalizer + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephBlockPoolRadosNamespace) + if err != nil { + return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to remove finalizer") + } + + // Return and do not requeue. Successful deletion. + return reconcile.Result{}, nil + } + return reconcileResponse, nil + } + + // Populate clusterInfo during each reconcile + r.clusterInfo, _, _, err = mon.LoadClusterInfo(r.context, r.opManagerContext, request.NamespacedName.Namespace) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to populate cluster info") + } + r.clusterInfo.Context = r.opManagerContext + + // DELETE: the CR was deleted + if !cephBlockPoolRadosNamespace.GetDeletionTimestamp().IsZero() { + logger.Debugf("delete ceph blockpool rados namespace %q", cephBlockPoolRadosNamespace.Name) + // On external cluster, we don't delete the rados namespace, it has to be deleted manually + if cephCluster.Spec.External.Enable { + logger.Warning("external rados namespace deletion is not supported, delete it manually") + } else { + err := r.deleteRadosNamespace(cephBlockPoolRadosNamespace) + if err != nil { + if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { + logger.Info(opcontroller.OperatorNotInitializedMessage) + return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil + } + return reconcile.Result{}, errors.Wrapf(err, "failed to delete ceph blockpool rados namespace %q", cephBlockPoolRadosNamespace.Name) + } + } + err = csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephBlockPoolRadosNamespace), r.clusterInfo, nil) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to save cluster config") + } + // Remove finalizer + err = opcontroller.RemoveFinalizer(r.opManagerContext, r.client, cephBlockPoolRadosNamespace) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to remove finalizer") + } + + // Return and do not requeue. Successful deletion. + return reconcile.Result{}, nil + } + + if !cephCluster.Spec.External.Enable { + // Build the NamespacedName to fetch the CephBlockPool and make sure it exists, if not we cannot + // create the rados namespace + cephBlockPool := &cephv1.CephBlockPool{} + cephBlockPoolRadosNamespacedName := types.NamespacedName{Name: cephBlockPoolRadosNamespace.Spec.BlockPoolName, Namespace: request.Namespace} + err = r.client.Get(r.opManagerContext, cephBlockPoolRadosNamespacedName, cephBlockPool) + if err != nil { + if kerrors.IsNotFound(err) { + return reconcile.Result{}, errors.Wrapf(err, "failed to fetch ceph blockpool %q, cannot create rados namespace %q", cephBlockPoolRadosNamespace.Spec.BlockPoolName, cephBlockPoolRadosNamespace.Name) + } + // Error reading the object - requeue the request. + return reconcile.Result{}, errors.Wrap(err, "failed to get cephBlockPoolRadosNamespace") + } + + // If the cephBlockPool is not ready to accept commands, we should wait for it to be ready + if cephBlockPool.Status.Phase != cephv1.ConditionReady { + // We know the CR is present so it should a matter of second for it to become ready + return reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second}, errors.Wrapf(err, "failed to fetch ceph blockpool %q, cannot create rados namespace %q", cephBlockPoolRadosNamespace.Spec.BlockPoolName, cephBlockPoolRadosNamespace.Name) + } + } + if cephCluster.Spec.External.Enable { + logger.Debug("external rados namespace creation is not supported, create it manually, the controller will assume it's there") + } else { + // Create or Update rados namespace + err = r.createOrUpdateRadosNamespace(cephBlockPoolRadosNamespace) + if err != nil { + if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { + logger.Info(opcontroller.OperatorNotInitializedMessage) + return opcontroller.WaitForRequeueIfOperatorNotInitialized, nil + } + r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionFailure) + return reconcile.Result{}, errors.Wrapf(err, "failed to create or update ceph pool rados namespace %q", cephBlockPoolRadosNamespace.Name) + } + } + // Update CSI config map + // If the mon endpoints change, the mon health check go routine will take care of updating the + // config map, so no special care is needed in this controller + csiClusterConfigEntry := csi.CsiClusterConfigEntry{ + Monitors: csi.MonEndpoints(r.clusterInfo.Monitors), + RadosNamespace: cephBlockPoolRadosNamespace.Name, + } + err = csi.SaveClusterConfig(r.context.Clientset, buildClusterID(cephBlockPoolRadosNamespace), r.clusterInfo, &csiClusterConfigEntry) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to save cluster config") + } + // Success! Let's update the status + if cephCluster.Spec.External.Enable { + r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionConnected) + } else { + r.updateStatus(r.client, request.NamespacedName, cephv1.ConditionReady) + } + + // Return and do not requeue + logger.Debug("done reconciling") + return reconcile.Result{}, nil +} + +// Create the ceph blockpool rados namespace +func (r *ReconcileCephBlockPoolRadosNamespace) createOrUpdateRadosNamespace(cephBlockPoolRadosNamespace *cephv1.CephBlockPoolRadosNamespace) error { + logger.Infof("creating ceph blockpool rados namespace %s in namespace %s", cephBlockPoolRadosNamespace.Name, cephBlockPoolRadosNamespace.Namespace) + + err := cephclient.CreateRadosNamespace(r.context, r.clusterInfo, cephBlockPoolRadosNamespace.Spec.BlockPoolName, cephBlockPoolRadosNamespace.Name) + if err != nil { + return errors.Wrapf(err, "failed to create ceph blockpool rados namespace %q", cephBlockPoolRadosNamespace.Name) + } + + return nil +} + +// Delete the ceph blockpool rados namespace +func (r *ReconcileCephBlockPoolRadosNamespace) deleteRadosNamespace(cephBlockPoolRadosNamespace *cephv1.CephBlockPoolRadosNamespace) error { + logger.Infof("deleting ceph blockpool rados namespace object %q", cephBlockPoolRadosNamespace.Name) + if err := cephclient.DeleteRadosNamespace(r.context, r.clusterInfo, cephBlockPoolRadosNamespace.Spec.BlockPoolName, cephBlockPoolRadosNamespace.Name); err != nil { + return errors.Wrapf(err, "failed to delete ceph blockpool rados namespace %q", cephBlockPoolRadosNamespace.Name) + } + + logger.Infof("deleted ceph blockpool rados namespace %q", cephBlockPoolRadosNamespace.Name) + return nil +} + +// updateStatus updates an object with a given status +func (r *ReconcileCephBlockPoolRadosNamespace) updateStatus(client client.Client, name types.NamespacedName, status cephv1.ConditionType) { + cephBlockPoolRadosNamespace := &cephv1.CephBlockPoolRadosNamespace{} + if err := client.Get(r.opManagerContext, name, cephBlockPoolRadosNamespace); err != nil { + if kerrors.IsNotFound(err) { + logger.Debug("CephBlockPoolRadosNamespace resource not found. Ignoring since object must be deleted.") + return + } + logger.Warningf("failed to retrieve ceph blockpool rados namespace %q to update status to %q. %v", name, status, err) + return + } + if cephBlockPoolRadosNamespace.Status == nil { + cephBlockPoolRadosNamespace.Status = &cephv1.CephBlockPoolRadosNamespaceStatus{} + } + + cephBlockPoolRadosNamespace.Status.Phase = status + cephBlockPoolRadosNamespace.Status.Info = map[string]string{"clusterID": buildClusterID(cephBlockPoolRadosNamespace)} + if err := reporting.UpdateStatus(client, cephBlockPoolRadosNamespace); err != nil { + logger.Errorf("failed to set ceph blockpool rados namespace %q status to %q. %v", name, status, err) + return + } + logger.Debugf("ceph blockpool rados namespace %q status updated to %q", name, status) +} + +func buildClusterID(cephBlockPoolRadosNamespace *cephv1.CephBlockPoolRadosNamespace) string { + clusterID := fmt.Sprintf("%s-%s-block-%s", cephBlockPoolRadosNamespace.Namespace, cephBlockPoolRadosNamespace.Spec.BlockPoolName, cephBlockPoolRadosNamespace.Name) + return k8sutil.Hash(clusterID) +} diff --git a/pkg/operator/ceph/pool/radosnamespace/controller_test.go b/pkg/operator/ceph/pool/radosnamespace/controller_test.go new file mode 100644 index 0000000000000..2d5055c0ba83b --- /dev/null +++ b/pkg/operator/ceph/pool/radosnamespace/controller_test.go @@ -0,0 +1,288 @@ +/* +Copyright 2022 The Rook Authors. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package radosnamespace + +import ( + "context" + "os" + "testing" + + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + rookclient "github.com/rook/rook/pkg/client/clientset/versioned/fake" + "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + "github.com/rook/rook/pkg/clusterd" + "github.com/rook/rook/pkg/operator/ceph/csi" + "github.com/rook/rook/pkg/operator/k8sutil" + testop "github.com/rook/rook/pkg/operator/test" + exectest "github.com/rook/rook/pkg/util/exec/test" + + "github.com/coreos/pkg/capnslog" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestCephBlockPoolRadosNamespaceController(t *testing.T) { + ctx := context.TODO() + // Set DEBUG logging + capnslog.SetGlobalLogLevel(capnslog.DEBUG) + os.Setenv("ROOK_LOG_LEVEL", "DEBUG") + + var ( + name = "namespace-a" + namespace = "rook-ceph" + ) + + // A cephBlockPoolRadosNamespace resource with metadata and spec. + cephBlockPoolRadosNamespace := &cephv1.CephBlockPoolRadosNamespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID("c47cac40-9bee-4d52-823b-ccd803ba5bfe"), + }, + Spec: cephv1.CephBlockPoolRadosNamespaceSpec{ + BlockPoolName: namespace, + }, + Status: &cephv1.CephBlockPoolRadosNamespaceStatus{ + Phase: "", + }, + } + + // Objects to track in the fake client. + object := []runtime.Object{ + cephBlockPoolRadosNamespace, + } + + executor := &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + if args[0] == "status" { + return `{"fsid":"c47cac40-9bee-4d52-823b-ccd803ba5bfe","health":{"checks":{},"status":"HEALTH_ERR"},"pgmap":{"num_pgs":100,"pgs_by_state":[{"state_name":"active+clean","count":100}]}}`, nil + } + + return "", nil + }, + } + c := &clusterd.Context{ + Executor: executor, + Clientset: testop.New(t, 1), + RookClientset: rookclient.NewSimpleClientset(), + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephClient{}, &cephv1.CephClusterList{}) + + // Create a fake client to mock API calls. + cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() + + // Create a ReconcileCephBlockPoolRadosNamespace object with the scheme and fake client. + r := &ReconcileCephBlockPoolRadosNamespace{ + client: cl, + scheme: s, + context: c, + opManagerContext: ctx, + } + + // Mock request to simulate Reconcile() being called on an event for a + // watched resource . + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + Namespace: namespace, + }, + } + + cephCluster := &cephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Namespace: namespace, + }, + Status: cephv1.ClusterStatus{ + Phase: "", + CephVersion: &cephv1.ClusterVersion{ + Version: "14.2.9-0", + }, + CephStatus: &cephv1.CephStatus{ + Health: "", + }, + }, + } + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephCluster{}, &cephv1.CephClusterList{}) + + t.Run("error - no ceph cluster", func(t *testing.T) { + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + }) + + t.Run("error - ceph cluster not ready", func(t *testing.T) { + object = append(object, cephCluster) + // Create a fake client to mock API calls. + cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() + // Create a ReconcileCephBlockPoolRadosNamespace object with the scheme and fake client. + r = &ReconcileCephBlockPoolRadosNamespace{client: cl, scheme: s, context: c, opManagerContext: context.TODO()} + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + + cephCluster.Status.Phase = cephv1.ConditionReady + cephCluster.Status.CephStatus.Health = "HEALTH_OK" + }) + + cephBlockPool := &cephv1.CephBlockPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Namespace: namespace, + }, + Status: &cephv1.CephBlockPoolStatus{ + Phase: "", + }, + } + + t.Run("error - ceph blockpool not ready", func(t *testing.T) { + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.True(t, res.Requeue) + cephBlockPool.Status.Phase = cephv1.ConditionReady + }) + + t.Run("success - ceph cluster ready, block pool rados namespace created", func(t *testing.T) { + // Mock clusterInfo + secrets := map[string][]byte{ + "fsid": []byte(name), + "mon-secret": []byte("monsecret"), + "admin-secret": []byte("adminsecret"), + } + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rook-ceph-mon", + Namespace: namespace, + }, + Data: secrets, + Type: k8sutil.RookType, + } + _, err := c.Clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + assert.NoError(t, err) + objects := []runtime.Object{ + cephBlockPoolRadosNamespace, + cephCluster, + cephBlockPool, + } + // Create a fake client to mock API calls. + cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() + c.Client = cl + + executor = &exectest.MockExecutor{ + MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { + if args[0] == "namespace" && args[1] == "create" { + return "", nil + } + + return "", nil + }, + } + c.Executor = executor + + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephBlockPoolList{}) + // Create a ReconcileCephBlockPoolRadosNamespace object with the scheme and fake client. + r = &ReconcileCephBlockPoolRadosNamespace{ + client: cl, + scheme: s, + context: c, + opManagerContext: context.TODO(), + } + + // Enable CSI + csi.EnableRBD = true + os.Setenv("POD_NAMESPACE", namespace) + // Create CSI config map + ownerRef := &metav1.OwnerReference{} + ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(ownerRef, "") + err = csi.CreateCsiConfigMap(namespace, c.Clientset, ownerInfo) + assert.NoError(t, err) + + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.False(t, res.Requeue) + + err = r.client.Get(context.TODO(), req.NamespacedName, cephBlockPoolRadosNamespace) + assert.NoError(t, err) + assert.Equal(t, cephv1.ConditionReady, cephBlockPoolRadosNamespace.Status.Phase) + + // test that csi configmap is created + cm, err := c.Clientset.CoreV1().ConfigMaps(namespace).Get(ctx, csi.ConfigName, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotEmpty(t, cm.Data[csi.ConfigKey]) + assert.Contains(t, cm.Data[csi.ConfigKey], "clusterID") + assert.Contains(t, cm.Data[csi.ConfigKey], name) + err = c.Clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, csi.ConfigName, metav1.DeleteOptions{}) + assert.NoError(t, err) + }) + + t.Run("success - external mode csi config is updated", func(t *testing.T) { + cephCluster.Spec.External.Enable = true + objects := []runtime.Object{ + cephBlockPoolRadosNamespace, + cephCluster, + } + // Create a fake client to mock API calls. + cl = fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objects...).Build() + c.Client = cl + + s.AddKnownTypes(cephv1.SchemeGroupVersion, &cephv1.CephBlockPoolList{}) + // Create a ReconcileCephBlockPoolRadosNamespace object with the scheme and fake client. + r = &ReconcileCephBlockPoolRadosNamespace{ + client: cl, + scheme: s, + context: c, + opManagerContext: ctx, + } + + // Enable CSI + csi.EnableRBD = true + os.Setenv("POD_NAMESPACE", namespace) + // Create CSI config map + ownerRef := &metav1.OwnerReference{} + ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(ownerRef, "") + err := csi.CreateCsiConfigMap(namespace, c.Clientset, ownerInfo) + assert.NoError(t, err) + + res, err := r.Reconcile(ctx, req) + assert.NoError(t, err) + assert.False(t, res.Requeue) + + err = r.client.Get(ctx, req.NamespacedName, cephBlockPoolRadosNamespace) + assert.NoError(t, err) + assert.Equal(t, cephv1.ConditionConnected, cephBlockPoolRadosNamespace.Status.Phase) + assert.NotEmpty(t, cephBlockPoolRadosNamespace.Status.Info["clusterID"]) + + // test that csi configmap is created + cm, err := c.Clientset.CoreV1().ConfigMaps(namespace).Get(ctx, csi.ConfigName, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotEmpty(t, cm.Data[csi.ConfigKey]) + assert.Contains(t, cm.Data[csi.ConfigKey], "clusterID") + assert.Contains(t, cm.Data[csi.ConfigKey], name) + }) +} + +func Test_buildClusterID(t *testing.T) { + longName := "foooooooooooooooooooooooooooooooooooooooooooo" + cephBlockPoolRadosNamespace := &cephv1.CephBlockPoolRadosNamespace{ObjectMeta: metav1.ObjectMeta{Namespace: "rook-ceph", Name: longName}, Spec: cephv1.CephBlockPoolRadosNamespaceSpec{BlockPoolName: "replicapool"}} + clusterID := buildClusterID(cephBlockPoolRadosNamespace) + assert.Equal(t, "2a74e5201e6ff9d15916ce2109c4f868", clusterID) +} diff --git a/tests/scripts/github-action-helper.sh b/tests/scripts/github-action-helper.sh index 972ef337190bd..4743c711c9a3a 100755 --- a/tests/scripts/github-action-helper.sh +++ b/tests/scripts/github-action-helper.sh @@ -217,6 +217,7 @@ function deploy_cluster() { kubectl create -f filesystem-mirror.yaml kubectl create -f nfs-test.yaml kubectl create -f subvolumegroup.yaml + kubectl create -f radosnamespace.yaml deploy_manifest_with_local_build toolbox.yaml }